hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
6b47828c318e94a288bc58607d06569b17d055d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "math_functions.h"
#include "round.cuh"
#include <cmath>
#include <stdio.h>
#include <hiprand/hiprand_kernel.h>
#include "utils.h"
#include <thrust\device_ptr.h>
#include <thrust\sort.h>
#include <map>
#include <string>
#include <sstream>
//#pragma comment(lib, "hiprand.lib")
using std::map;
using std::string;
using std::stringstream;
#define INDEX_SIZE_IN_BYTES 8
#define EXTRACT_9 0x7fffffffffffffff
#define EXTRACT_8 0x00ffffffffffffff
#define EXTRACT_7 0x0001ffffffffffff
#define EXTRACT_6 0x000003ffffffffff
#define ROTR32(x, n) (((0U + (x)) << (32 - (n))) | ((x) >> (n))) // Assumes that x is uint32_t and 0 < n < 32
#define LOADSCHEDULE(i) \
schedule[i] = (uint32_t)data[i * 4 + 0] << 24 \
| (uint32_t)data[i * 4 + 1] << 16 \
| (uint32_t)data[i * 4 + 2] << 8 \
| (uint32_t)data[i * 4 + 3] << 0;
#define SCHEDULE(i) \
schedule[i] = 0U + schedule[i - 16] + schedule[i - 7] \
+ (ROTR32(schedule[i - 15], 7) ^ ROTR32(schedule[i - 15], 18) ^ (schedule[i - 15] >> 3)) \
+ (ROTR32(schedule[i - 2], 17) ^ ROTR32(schedule[i - 2], 19) ^ (schedule[i - 2] >> 10));
//#define SCHEDULE(i) \
// asm("{\n\t" \
// ".reg .u32 t1;\n\t" \
// ".reg .u32 t2;\n\t" \
// ".reg .u32 t3;\n\t" \
// ".reg .u32 s1;\n\t" \
// ".reg .u32 s2;\n\t" \
// ".reg .u32 s3;\n\t" \
// ".reg .u32 s4;\n\t" \
// "mov.u32 s1, %1;\n\t" \
// "mov.u32 s2, %2;\n\t" \
// "mov.u32 s3, %3;\n\t" \
// "mov.u32 s4, %4;\n\t" \
// "add.u32 t1, s1, s2;\n\t" \
// "shf.r.clamp.b32 t2, s3, s3, 7;\n\t" \
// "shf.r.clamp.b32 t3, s3, s3, 18;\n\t" \
// "xor.b32 t2, t2, t3;\n\t" \
// "shr.u32 t3, s3, 3;\n\t" \
// "xor.b32 t2, t2 ,t3;\n\t" \
// "add.u32 t1, t1, t2;\n\t" \
// "shf.r.clamp.b32 t2, s4, s4, 17;\n\t" \
// "shf.r.clamp.b32 t3, s4, s4, 19;\n\t" \
// "xor.b32 t2, t2, t3;\n\t" \
// "shr.u32 t3, %4, 10;\n\t" \
// "xor.b32 t2, t2, t3;\n\t" \
// "add.u32 t1, t1, t2;\n\t" \
// "mov.u32 %0, t1;\n\t" \
// "}" \
// : "=r"(schedule[i]) : "r"(schedule[i - 16]), "r"(schedule[i - 7]), "r"(schedule[i - 15]), "r"(schedule[i - 2]));
//#define SCHEDULE(i) \
// asm("{\n\t" \
// ".reg .u32 t2;\n\t" \
// ".reg .u32 t3;\n\t" \
// "shf.r.clamp.b32 t2, %3, %3, 7;\n\t" \
// "shf.r.clamp.b32 t3, %3, %3, 18;\n\t" \
// "xor.b32 t2, t2, t3;\n\t" \
// "shr.u32 t3, %3, 3;\n\t" \
// "xor.b32 t2, t2 ,t3;\n\t" \
// "add.u32 %0, %0, t2;\n\t" \
// "shf.r.clamp.b32 t2, %4, %4, 17;\n\t" \
// "shf.r.clamp.b32 t3, %4, %4, 19;\n\t" \
// "xor.b32 t2, t2, t3;\n\t" \
// "shr.u32 t3, %4, 10;\n\t" \
// "xor.b32 t2, t2, t3;\n\t" \
// "add.u32 %0, %0, t2;\n\t" \
// "add.u32 t2, %1, %2;\n\t" \
// "add.u32 %0, %0, t2;\n\t" \
// "}" \
// : "=r"(schedule[i]) : "r"(schedule[i - 16]), "r"(schedule[i - 7]), "r"(schedule[i - 15]), "r"(schedule[i - 2]));
#define ROUND(a, b, c, d, e, f, g, h, i, k) \
h = 0U + h + (ROTR32(e, 6) ^ ROTR32(e, 11) ^ ROTR32(e, 25)) + (g ^ (e & (f ^ g))) + UINT32_C(k) + schedule[i]; \
d = 0U + d + h; \
h = 0U + h + (ROTR32(a, 2) ^ ROTR32(a, 13) ^ ROTR32(a, 22)) + ((a & (b | c)) | (b & c));
#define CUDA_CALL(x) {const hipError_t a = (x);if(a!=hipSuccess){printf("\nCUDA Error:%s(err_num=%d)\n",hipGetErrorString(a),a);}}
#define CURAND_CALL(x) do { if((x)!=HIPRAND_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0)
//__shared__ uint k[64];
__constant__ char constantAreaPlainCharSet[36];
__shared__ char plainCharSet[384][95];
__shared__ uint state[384][8];
struct ChainComparator {
__host__ __device__
bool operator()(const struct Chain& lhs, const struct Chain& rhs) {
return lhs.indexE < rhs.indexE;
}
};
struct HashCompartor {
__host__ __device__
bool operator()(const struct PasswordMapping& lhs, const struct PasswordMapping& rhs) {
//const ulong* lhsP = (const ulong*)lhs.hash;
//const ulong* rhsP = (const ulong*)rhs.hash;
//ulong lhs4 = *(lhsP + 3);
//ulong lhs3 = *(lhsP + 2);
//ulong lhs2 = *(lhsP + 1);
//ulong lhs1 = *(lhsP);
//ulong rhs4 = *(rhsP + 3);
//ulong rhs3 = *(rhsP + 2);
//ulong rhs2 = *(rhsP + 1);
//ulong rhs1 = *(rhsP);
//return lhs1 < rhs1
// || lhs1 == rhs1 && lhs2 < rhs2
// || lhs1 == rhs1 && lhs2 == rhs2 && lhs3 < rhs3
// || lhs1 == rhs1 && lhs2 == rhs2 && lhs3 == rhs3 && lhs4 < rhs4;
bool flap = true;
for (int i = 0; i < 32; i++) {
if (lhs.hash[i] > rhs.hash[i]) {
flap = false;
break;
}
}
if (flap) {
return true;
}
else {
return false;
}
}
};
void QSort(struct PasswordMapping* mappings, uint32_t CHAINS_SIZE) {
thrust::device_ptr<struct PasswordMapping> thrustChainP(mappings);
thrust::sort(thrustChainP, thrustChainP + CHAINS_SIZE, HashCompartor());
}
__device__ void indexToPlain(ulong index, const uint8_t plainLength,
const uint8_t plainCharsetSize, char* plain)
{
for (int i = plainLength - 1;i >= 0;i--) {
plain[i] = index % plainCharsetSize;
index /= plainCharsetSize;
}
}
__device__ inline void indexToPlain(ulong index, char* plain, const uint8_t plainLength, const char* charSet, const unsigned int charSetSize)
{
for (int i = plainLength - 1; i >= 0; i--) {
plain[i] = charSet[(index & 0x7f) % charSetSize];
index >>= 7;
}
}
/*__device__ ulong plainToIndex(const char* plain, size_t plainLength, const char* charSet, size_t charSetSize, map<char, size_t>* charIndexMap)
{
ulong index = 0;
int i;
for (i = 0;i<plainLength - 1;i++) {
index += charIndexMap->operator[](plain[i]) & 0x7f;
index <<= 7;
}
index += charIndexMap->operator[](plain[i]) & 0x7f;
return index;
}*/
__device__ inline ulong hashToIndexPaperVersion(unsigned char* hash, int pos, const uint8_t plainCharSetSize)
{
unsigned int* hashP = (unsigned int*)hash;
unsigned int p0 = *(hashP + 4) ^ pos;
unsigned int p2 = *(hashP + 5) ^ (pos >> 12);
unsigned int p4 = *(hashP + 6) ^ (pos >> 24);
unsigned int p6 = *(hashP + 7);
char* plainCharSetP = plainCharSet[threadIdx.x];
unsigned __int16 b0 = plainCharSetP[p0 % plainCharSetSize] << 8 | plainCharSetP[(p0 >> 16) % plainCharSetSize];
unsigned __int16 b1 = plainCharSetP[p2 % plainCharSetSize] << 8 | plainCharSetP[(p2 >> 16) % plainCharSetSize];
unsigned __int16 b2 = plainCharSetP[p4 % plainCharSetSize] << 8 | plainCharSetP[(p4 >> 16) % plainCharSetSize];
unsigned __int16 b3 = plainCharSetP[p6 % plainCharSetSize] << 8 | plainCharSetP[(p6 >> 16) % plainCharSetSize];
/*unsigned __int16 b0 = constantAreaPlainCharSet[p0 % plainCharSetSize] << 8 | constantAreaPlainCharSet[(p0 >> 16) % plainCharSetSize];
unsigned __int16 b1 = constantAreaPlainCharSet[p2 % plainCharSetSize] << 8 | constantAreaPlainCharSet[(p2 >> 16) % plainCharSetSize];
unsigned __int16 b2 = constantAreaPlainCharSet[p4 % plainCharSetSize] << 8 | constantAreaPlainCharSet[(p4 >> 16) % plainCharSetSize];
unsigned __int16 b3 = constantAreaPlainCharSet[p6 % plainCharSetSize] << 8 | constantAreaPlainCharSet[(p6 >> 16) % plainCharSetSize];*/
ulong index = 0;
index += b0;
index <<= 16;
index += b1;
index <<= 16;
index += b2;
index <<= 16;
index += b3;
return index;
}
__device__ inline ulong hashToIndexWithoutCharSet(unsigned char* hash, int pos, const uint8_t plainCharSetSize)
{
unsigned int* hashP = (unsigned int*)hash;
unsigned int p0 = *(hashP + 4) ^ pos;
unsigned int p2 = *(hashP + 5) ^ (pos >> 12);
unsigned int p4 = *(hashP + 6) ^ (pos >> 24);
unsigned int p6 = *(hashP + 7);
unsigned __int16 b0 = ((p0 % plainCharSetSize) << 8) | ((p0 >> 16) % plainCharSetSize);
unsigned __int16 b1 = ((p2 % plainCharSetSize) << 8) | ((p2 >> 16) % plainCharSetSize);
unsigned __int16 b2 = ((p4 % plainCharSetSize) << 8) | ((p4 >> 16) % plainCharSetSize);
unsigned __int16 b3 = ((p6 % plainCharSetSize) << 8) | ((p6 >> 16) % plainCharSetSize);
ulong index = 0;
index += b0;
index <<= 16;
index += b1;
index <<= 16;
index += b2;
index <<= 16;
index += b3;
return index;
}
__device__ inline void plainToHashWithInlinePTX(const char* plain, const uint8_t length, unsigned char* res) {
unsigned int bitlen0 = 0;
unsigned int bitlen1 = 0;
unsigned int stateP[8];
//unsigned int* stateP = state[threadIdx.x];
unsigned char data[64];
unsigned int l;
for (l = 0; l < length; ++l) {
data[l] = plain[l];
}
stateP[0] = 0x6a09e667;
stateP[1] = 0xbb67ae85;
stateP[2] = 0x3c6ef372;
stateP[3] = 0xa54ff53a;
stateP[4] = 0x510e527f;
stateP[5] = 0x9b05688c;
stateP[6] = 0x1f83d9ab;
stateP[7] = 0x5be0cd19;
data[l++] = 0x80;
while (l < 56)
data[l++] = 0x00;
//// Append to the padding the total message's length in bits and transform.
DBL_INT_ADD(bitlen0, bitlen1, length * 8);
data[63] = bitlen0;
data[62] = bitlen0 >> 8;
data[61] = bitlen0 >> 16;
data[60] = bitlen0 >> 24;
data[59] = bitlen1;
data[58] = bitlen1 >> 8;
data[57] = bitlen1 >> 16;
data[56] = bitlen1 >> 24;
uint32_t schedule[16];
uint32_t a = stateP[0];
uint32_t b = stateP[1];
uint32_t c = stateP[2];
uint32_t d = stateP[3];
uint32_t e = stateP[4];
uint32_t f = stateP[5];
uint32_t g = stateP[6];
uint32_t h = stateP[7];
ROUNDa(0, a, b, c, d, e, f, g, h, 0x428A2F98)
ROUNDa(1, h, a, b, c, d, e, f, g, 0x71374491)
ROUNDa(2, g, h, a, b, c, d, e, f, -0x4A3F0431)
ROUNDa(3, f, g, h, a, b, c, d, e, -0x164A245B)
ROUNDa(4, e, f, g, h, a, b, c, d, 0x3956C25B)
ROUNDa(5, d, e, f, g, h, a, b, c, 0x59F111F1)
ROUNDa(6, c, d, e, f, g, h, a, b, -0x6DC07D5C)
ROUNDa(7, b, c, d, e, f, g, h, a, -0x54E3A12B)
ROUNDa(8, a, b, c, d, e, f, g, h, -0x27F85568)
ROUNDa(9, h, a, b, c, d, e, f, g, 0x12835B01)
ROUNDa(10, g, h, a, b, c, d, e, f, 0x243185BE)
ROUNDa(11, f, g, h, a, b, c, d, e, 0x550C7DC3)
ROUNDa(12, e, f, g, h, a, b, c, d, 0x72BE5D74)
ROUNDa(13, d, e, f, g, h, a, b, c, -0x7F214E02)
ROUNDa(14, c, d, e, f, g, h, a, b, -0x6423F959)
ROUNDa(15, b, c, d, e, f, g, h, a, -0x3E640E8C)
ROUND16(16, a, b, c, d, e, f, g, h, -0x1B64963F)
ROUND17(17, h, a, b, c, d, e, f, g, -0x1041B87A)
ROUND18(18, g, h, a, b, c, d, e, f, 0x0FC19DC6)
ROUND19(19, f, g, h, a, b, c, d, e, 0x240CA1CC)
ROUND20(20, e, f, g, h, a, b, c, d, 0x2DE92C6F)
ROUND21(21, d, e, f, g, h, a, b, c, 0x4A7484AA)
ROUND22(22, c, d, e, f, g, h, a, b, 0x5CB0A9DC)
ROUND23(23, b, c, d, e, f, g, h, a, 0x76F988DA)
ROUND24(24, a, b, c, d, e, f, g, h, -0x67C1AEAE)
ROUND25(25, h, a, b, c, d, e, f, g, -0x57CE3993)
ROUND26(26, g, h, a, b, c, d, e, f, -0x4FFCD838)
ROUND27(27, f, g, h, a, b, c, d, e, -0x40A68039)
ROUND28(28, e, f, g, h, a, b, c, d, -0x391FF40D)
ROUND29(29, d, e, f, g, h, a, b, c, -0x2A586EB9)
ROUND30(30, c, d, e, f, g, h, a, b, 0x06CA6351)
ROUND31(31, b, c, d, e, f, g, h, a, 0x14292967)
ROUND16(32, a, b, c, d, e, f, g, h, 0x27B70A85)
ROUND17(33, h, a, b, c, d, e, f, g, 0x2E1B2138)
ROUND18(34, g, h, a, b, c, d, e, f, 0x4D2C6DFC)
ROUND19(35, f, g, h, a, b, c, d, e, 0x53380D13)
ROUND20(36, e, f, g, h, a, b, c, d, 0x650A7354)
ROUND21(37, d, e, f, g, h, a, b, c, 0x766A0ABB)
ROUND22(38, c, d, e, f, g, h, a, b, -0x7E3D36D2)
ROUND23(39, b, c, d, e, f, g, h, a, -0x6D8DD37B)
ROUND24(40, a, b, c, d, e, f, g, h, -0x5D40175F)
ROUND25(41, h, a, b, c, d, e, f, g, -0x57E599B5)
ROUND26(42, g, h, a, b, c, d, e, f, -0x3DB47490)
ROUND27(43, f, g, h, a, b, c, d, e, -0x3893AE5D)
ROUND28(44, e, f, g, h, a, b, c, d, -0x2E6D17E7)
ROUND29(45, d, e, f, g, h, a, b, c, -0x2966F9DC)
ROUND30(46, c, d, e, f, g, h, a, b, -0x0BF1CA7B)
ROUND31(47, b, c, d, e, f, g, h, a, 0x106AA070)
ROUND16(48, a, b, c, d, e, f, g, h, 0x19A4C116)
ROUND17(49, h, a, b, c, d, e, f, g, 0x1E376C08)
ROUND18(50, g, h, a, b, c, d, e, f, 0x2748774C)
ROUND19(51, f, g, h, a, b, c, d, e, 0x34B0BCB5)
ROUND20(52, e, f, g, h, a, b, c, d, 0x391C0CB3)
ROUND21(53, d, e, f, g, h, a, b, c, 0x4ED8AA4A)
ROUND22(54, c, d, e, f, g, h, a, b, 0x5B9CCA4F)
ROUND23(55, b, c, d, e, f, g, h, a, 0x682E6FF3)
ROUND24(56, a, b, c, d, e, f, g, h, 0x748F82EE)
ROUND25(57, h, a, b, c, d, e, f, g, 0x78A5636F)
ROUND26(58, g, h, a, b, c, d, e, f, -0x7B3787EC)
ROUND27(59, f, g, h, a, b, c, d, e, -0x7338FDF8)
ROUND28(60, e, f, g, h, a, b, c, d, -0x6F410006)
ROUND29(61, d, e, f, g, h, a, b, c, -0x5BAF9315)
ROUND30(62, c, d, e, f, g, h, a, b, -0x41065C09)
ROUND31(63, b, c, d, e, f, g, h, a, -0x398E870E)
stateP[0] += a;
stateP[1] += b;
stateP[2] += c;
stateP[3] += d;
stateP[4] += e;
stateP[5] += f;
stateP[6] += g;
stateP[7] += h;
// Since this implementation uses little endian byte ordering and SHA uses big endian,
// reverse all the bytes when copying the final state to the output hash.
for (int i = 0; i < 4; ++i) {
l = i << 3;
*(res) = (stateP[0] >> (24 - l)) & 0x000000ff;
*(res + 4) = (stateP[1] >> (24 - l)) & 0x000000ff;
*(res + 8) = (stateP[2] >> (24 - l)) & 0x000000ff;
*(res + 12) = (stateP[3] >> (24 - l)) & 0x000000ff;
*(res + 16) = (stateP[4] >> (24 - l)) & 0x000000ff;
*(res + 20) = (stateP[5] >> (24 - l)) & 0x000000ff;
*(res + 24) = (stateP[6] >> (24 - l)) & 0x000000ff;
*(res + 28) = (stateP[7] >> (24 - l)) & 0x000000ff;
++res;
}
//unsigned int* resP = (unsigned int*)res;
//unsigned char* stateCP = (unsigned char*)stateP;
//*(resP) = (((unsigned int)*(stateCP)<<0)| ((unsigned int)*(stateCP+1)<<8)| ((unsigned int)*(stateCP+2)<<16)| ((unsigned int)*(stateCP+3)<<24));
//*(resP+1) = ((unsigned int)(*(stateCP+4) << 0) | ((unsigned int)*(stateCP + 5) << 8) | ((unsigned int)*(stateCP + 6) << 16) | ((unsigned int)*(stateCP + 7) << 24));
//*(resP+2) = (((unsigned int)*(stateCP+8) << 0) | ((unsigned int)*(stateCP + 9) << 8) | ((unsigned int)*(stateCP + 10) << 16) | ((unsigned int)*(stateCP + 11) << 24));
//*(resP+3) = (((unsigned int)*(stateCP+12) << 0) | ((unsigned int)*(stateCP + 13) << 8) | ((unsigned int)*(stateCP + 14) << 16) | ((unsigned int)*(stateCP + 15) << 24));
//*(resP+4) = (((unsigned int)*(stateCP+16) << 0) | ((unsigned int)*(stateCP + 17) << 8) | ((unsigned int)*(stateCP + 18) << 16) | ((unsigned int)*(stateCP + 19) << 24));
//*(resP+5) = (((unsigned int)*(stateCP+20) << 0) | ((unsigned int)*(stateCP + 21) << 8) | ((unsigned int)*(stateCP + 22) << 16) | ((unsigned int)*(stateCP + 23) << 24));
//*(resP+6) = (((unsigned int)*(stateCP+24) << 0) | ((unsigned int)*(stateCP + 25) << 8) | ((unsigned int)*(stateCP + 26) << 16) | ((unsigned int)*(stateCP + 27) << 24));
//*(resP+7) = (((unsigned int)*(stateCP+28) << 0) | ((unsigned int)*(stateCP + 29) << 8) | ((unsigned int)*(stateCP + 30) << 16) | ((unsigned int)*(stateCP + 31) << 24));
///**((unsigned int*)res) = ((*((unsigned char*)stateP) << 0) | (*((unsigned char*)stateP + 1) << 8) | (*((unsigned char*)stateP + 2) << 16) | (*((unsigned char*)stateP + 3) << 24));
//*((unsigned int*)res + 1) = ((*((unsigned char*)stateP + 4) << 0) | (*((unsigned char*)stateP + 5) << 8) | (*((unsigned char*)stateP + 6) << 16) | (*((unsigned char*)stateP + 7) << 24));
//*((unsigned int*)res + 2) = ((*((unsigned char*)stateP + 8) << 0) | (*((unsigned char*)stateP + 9) << 8) | (*((unsigned char*)stateP + 10) << 16) | (*((unsigned char*)stateP + 11) << 24));
//*((unsigned int*)res + 3) = ((*((unsigned char*)stateP + 12) << 0) | (*((unsigned char*)stateP + 13) << 8) | (*((unsigned char*)stateP + 14) << 16) | (*((unsigned char*)stateP + 15) << 24));
//*((unsigned int*)res + 4) = ((*((unsigned char*)stateP + 16) << 0) | (*((unsigned char*)stateP + 17) << 8) | (*((unsigned char*)stateP + 18) << 16) | (*((unsigned char*)stateP + 19) << 24));
//*((unsigned int*)res + 5) = ((*((unsigned char*)stateP + 20) << 0) | (*((unsigned char*)stateP + 21) << 8) | (*((unsigned char*)stateP + 22) << 16) | (*((unsigned char*)stateP + 23) << 24));
//*((unsigned int*)res + 6) = ((*((unsigned char*)stateP + 24) << 0) | (*((unsigned char*)stateP + 25) << 8) | (*((unsigned char*)stateP + 26) << 16) | (*((unsigned char*)stateP + 27) << 24));
//*((unsigned int*)res + 7) = ((*((unsigned char*)stateP + 28) << 0) | (*((unsigned char*)stateP + 29) << 8) | (*((unsigned char*)stateP + 30) << 16) | (*((unsigned char*)stateP + 31) << 24));*/
}
__device__ inline void plainToHashWithInlinePTX(ulong index, const uint8_t length, unsigned char* res, const uint8_t charSetSize) {
unsigned int bitlen0 = 0;
unsigned int bitlen1 = 0;
unsigned int stateP[8];
unsigned char data[64];
unsigned int l;
// reduct the index in the plain space
for (l = length - 1; l >= 1; l--) {
data[l] = (index & 0x7f) % charSetSize + 32;
index >>= 7;
}
data[0] = (index & 0x7f) % charSetSize + 32;
l = length;
stateP[0] = 0x6a09e667;
stateP[1] = 0xbb67ae85;
stateP[2] = 0x3c6ef372;
stateP[3] = 0xa54ff53a;
stateP[4] = 0x510e527f;
stateP[5] = 0x9b05688c;
stateP[6] = 0x1f83d9ab;
stateP[7] = 0x5be0cd19;
data[l++] = 0x80;
while (l < 56)
data[l++] = 0x00;
//// Append to the padding the total message's length in bits and transform.
DBL_INT_ADD(bitlen0, bitlen1, length * 8);
data[63] = bitlen0;
data[62] = bitlen0 >> 8;
data[61] = bitlen0 >> 16;
data[60] = bitlen0 >> 24;
data[59] = bitlen1;
data[58] = bitlen1 >> 8;
data[57] = bitlen1 >> 16;
data[56] = bitlen1 >> 24;
uint32_t schedule[16];
uint32_t a = stateP[0];
uint32_t b = stateP[1];
uint32_t c = stateP[2];
uint32_t d = stateP[3];
uint32_t e = stateP[4];
uint32_t f = stateP[5];
uint32_t g = stateP[6];
uint32_t h = stateP[7];
ROUNDa(0, a, b, c, d, e, f, g, h, 0x428A2F98)
ROUNDa(1, h, a, b, c, d, e, f, g, 0x71374491)
ROUNDa(2, g, h, a, b, c, d, e, f, -0x4A3F0431)
ROUNDa(3, f, g, h, a, b, c, d, e, -0x164A245B)
ROUNDa(4, e, f, g, h, a, b, c, d, 0x3956C25B)
ROUNDa(5, d, e, f, g, h, a, b, c, 0x59F111F1)
ROUNDa(6, c, d, e, f, g, h, a, b, -0x6DC07D5C)
ROUNDa(7, b, c, d, e, f, g, h, a, -0x54E3A12B)
ROUNDa(8, a, b, c, d, e, f, g, h, -0x27F85568)
ROUNDa(9, h, a, b, c, d, e, f, g, 0x12835B01)
ROUNDa(10, g, h, a, b, c, d, e, f, 0x243185BE)
ROUNDa(11, f, g, h, a, b, c, d, e, 0x550C7DC3)
ROUNDa(12, e, f, g, h, a, b, c, d, 0x72BE5D74)
ROUNDa(13, d, e, f, g, h, a, b, c, -0x7F214E02)
ROUNDa(14, c, d, e, f, g, h, a, b, -0x6423F959)
ROUNDa(15, b, c, d, e, f, g, h, a, -0x3E640E8C)
ROUND16(16, a, b, c, d, e, f, g, h, -0x1B64963F)
ROUND17(17, h, a, b, c, d, e, f, g, -0x1041B87A)
ROUND18(18, g, h, a, b, c, d, e, f, 0x0FC19DC6)
ROUND19(19, f, g, h, a, b, c, d, e, 0x240CA1CC)
ROUND20(20, e, f, g, h, a, b, c, d, 0x2DE92C6F)
ROUND21(21, d, e, f, g, h, a, b, c, 0x4A7484AA)
ROUND22(22, c, d, e, f, g, h, a, b, 0x5CB0A9DC)
ROUND23(23, b, c, d, e, f, g, h, a, 0x76F988DA)
ROUND24(24, a, b, c, d, e, f, g, h, -0x67C1AEAE)
ROUND25(25, h, a, b, c, d, e, f, g, -0x57CE3993)
ROUND26(26, g, h, a, b, c, d, e, f, -0x4FFCD838)
ROUND27(27, f, g, h, a, b, c, d, e, -0x40A68039)
ROUND28(28, e, f, g, h, a, b, c, d, -0x391FF40D)
ROUND29(29, d, e, f, g, h, a, b, c, -0x2A586EB9)
ROUND30(30, c, d, e, f, g, h, a, b, 0x06CA6351)
ROUND31(31, b, c, d, e, f, g, h, a, 0x14292967)
ROUND16(32, a, b, c, d, e, f, g, h, 0x27B70A85)
ROUND17(33, h, a, b, c, d, e, f, g, 0x2E1B2138)
ROUND18(34, g, h, a, b, c, d, e, f, 0x4D2C6DFC)
ROUND19(35, f, g, h, a, b, c, d, e, 0x53380D13)
ROUND20(36, e, f, g, h, a, b, c, d, 0x650A7354)
ROUND21(37, d, e, f, g, h, a, b, c, 0x766A0ABB)
ROUND22(38, c, d, e, f, g, h, a, b, -0x7E3D36D2)
ROUND23(39, b, c, d, e, f, g, h, a, -0x6D8DD37B)
ROUND24(40, a, b, c, d, e, f, g, h, -0x5D40175F)
ROUND25(41, h, a, b, c, d, e, f, g, -0x57E599B5)
ROUND26(42, g, h, a, b, c, d, e, f, -0x3DB47490)
ROUND27(43, f, g, h, a, b, c, d, e, -0x3893AE5D)
ROUND28(44, e, f, g, h, a, b, c, d, -0x2E6D17E7)
ROUND29(45, d, e, f, g, h, a, b, c, -0x2966F9DC)
ROUND30(46, c, d, e, f, g, h, a, b, -0x0BF1CA7B)
ROUND31(47, b, c, d, e, f, g, h, a, 0x106AA070)
ROUND16(48, a, b, c, d, e, f, g, h, 0x19A4C116)
ROUND17(49, h, a, b, c, d, e, f, g, 0x1E376C08)
ROUND18(50, g, h, a, b, c, d, e, f, 0x2748774C)
ROUND19(51, f, g, h, a, b, c, d, e, 0x34B0BCB5)
ROUND20(52, e, f, g, h, a, b, c, d, 0x391C0CB3)
ROUND21(53, d, e, f, g, h, a, b, c, 0x4ED8AA4A)
ROUND22(54, c, d, e, f, g, h, a, b, 0x5B9CCA4F)
ROUND23(55, b, c, d, e, f, g, h, a, 0x682E6FF3)
ROUND24(56, a, b, c, d, e, f, g, h, 0x748F82EE)
ROUND25(57, h, a, b, c, d, e, f, g, 0x78A5636F)
ROUND26(58, g, h, a, b, c, d, e, f, -0x7B3787EC)
ROUND27(59, f, g, h, a, b, c, d, e, -0x7338FDF8)
ROUND28(60, e, f, g, h, a, b, c, d, -0x6F410006)
ROUND29(61, d, e, f, g, h, a, b, c, -0x5BAF9315)
ROUND30(62, c, d, e, f, g, h, a, b, -0x41065C09)
ROUND31(63, b, c, d, e, f, g, h, a, -0x398E870E)
stateP[0] += a;
stateP[1] += b;
stateP[2] += c;
stateP[3] += d;
stateP[4] += e;
stateP[5] += f;
stateP[6] += g;
stateP[7] += h;
// Since this implementation uses little endian byte ordering and SHA uses big endian,
// reverse all the bytes when copying the final state to the output hash.
for (int i = 0; i < 4; ++i) {
l = i << 3;
*(res) = (stateP[0] >> (24 - l)) & 0x000000ff;
*(res + 4) = (stateP[1] >> (24 - l)) & 0x000000ff;
*(res + 8) = (stateP[2] >> (24 - l)) & 0x000000ff;
*(res + 12) = (stateP[3] >> (24 - l)) & 0x000000ff;
*(res + 16) = (stateP[4] >> (24 - l)) & 0x000000ff;
*(res + 20) = (stateP[5] >> (24 - l)) & 0x000000ff;
*(res + 24) = (stateP[6] >> (24 - l)) & 0x000000ff;
*(res + 28) = (stateP[7] >> (24 - l)) & 0x000000ff;
++res;
}
}
__device__ inline void plainToHash(char* plain, const uint8_t length, unsigned char* res)
{
unsigned int bitlen0 = 0;
unsigned int bitlen1 = 0;
//unsigned int stateP[8];
unsigned char data[64];
unsigned int l;
for (l = 0; l < length; ++l) {
data[l] = plain[l];
}
uint* stateP = state[threadIdx.x];
stateP[0] = 0x6a09e667;
stateP[1] = 0xbb67ae85;
stateP[2] = 0x3c6ef372;
stateP[3] = 0xa54ff53a;
stateP[4] = 0x510e527f;
stateP[5] = 0x9b05688c;
stateP[6] = 0x1f83d9ab;
stateP[7] = 0x5be0cd19;
// Pad whatever data is left in the buffer.
data[l++] = 0x80;
while (l < 56)
data[l++] = 0x00;
// Append to the padding the total message's length in bits and transform.
DBL_INT_ADD(bitlen0, bitlen1, length * 8);
data[63] = bitlen0;
data[62] = bitlen0 >> 8;
data[61] = bitlen0 >> 16;
data[60] = bitlen0 >> 24;
data[59] = bitlen1;
data[58] = bitlen1 >> 8;
data[57] = bitlen1 >> 16;
data[56] = bitlen1 >> 24;
uint32_t schedule[64];
LOADSCHEDULE(0)
LOADSCHEDULE(1)
LOADSCHEDULE(2)
LOADSCHEDULE(3)
LOADSCHEDULE(4)
LOADSCHEDULE(5)
LOADSCHEDULE(6)
LOADSCHEDULE(7)
LOADSCHEDULE(8)
LOADSCHEDULE(9)
LOADSCHEDULE(10)
LOADSCHEDULE(11)
LOADSCHEDULE(12)
LOADSCHEDULE(13)
LOADSCHEDULE(14)
LOADSCHEDULE(15)
SCHEDULE(16)
SCHEDULE(17)
SCHEDULE(18)
SCHEDULE(19)
SCHEDULE(20)
SCHEDULE(21)
SCHEDULE(22)
SCHEDULE(23)
SCHEDULE(24)
SCHEDULE(25)
SCHEDULE(26)
SCHEDULE(27)
SCHEDULE(28)
SCHEDULE(29)
SCHEDULE(30)
SCHEDULE(31)
SCHEDULE(32)
SCHEDULE(33)
SCHEDULE(34)
SCHEDULE(35)
SCHEDULE(36)
SCHEDULE(37)
SCHEDULE(38)
SCHEDULE(39)
SCHEDULE(40)
SCHEDULE(41)
SCHEDULE(42)
SCHEDULE(43)
SCHEDULE(44)
SCHEDULE(45)
SCHEDULE(46)
SCHEDULE(47)
SCHEDULE(48)
SCHEDULE(49)
SCHEDULE(50)
SCHEDULE(51)
SCHEDULE(52)
SCHEDULE(53)
SCHEDULE(54)
SCHEDULE(55)
SCHEDULE(56)
SCHEDULE(57)
SCHEDULE(58)
SCHEDULE(59)
SCHEDULE(60)
SCHEDULE(61)
SCHEDULE(62)
SCHEDULE(63)
uint32_t a = stateP[0];
uint32_t b = stateP[1];
uint32_t c = stateP[2];
uint32_t d = stateP[3];
uint32_t e = stateP[4];
uint32_t f = stateP[5];
uint32_t g = stateP[6];
uint32_t h = stateP[7];
ROUND(a, b, c, d, e, f, g, h, 0, 0x428A2F98)
ROUND(h, a, b, c, d, e, f, g, 1, 0x71374491)
ROUND(g, h, a, b, c, d, e, f, 2, 0xB5C0FBCF)
ROUND(f, g, h, a, b, c, d, e, 3, 0xE9B5DBA5)
ROUND(e, f, g, h, a, b, c, d, 4, 0x3956C25B)
ROUND(d, e, f, g, h, a, b, c, 5, 0x59F111F1)
ROUND(c, d, e, f, g, h, a, b, 6, 0x923F82A4)
ROUND(b, c, d, e, f, g, h, a, 7, 0xAB1C5ED5)
ROUND(a, b, c, d, e, f, g, h, 8, 0xD807AA98)
ROUND(h, a, b, c, d, e, f, g, 9, 0x12835B01)
ROUND(g, h, a, b, c, d, e, f, 10, 0x243185BE)
ROUND(f, g, h, a, b, c, d, e, 11, 0x550C7DC3)
ROUND(e, f, g, h, a, b, c, d, 12, 0x72BE5D74)
ROUND(d, e, f, g, h, a, b, c, 13, 0x80DEB1FE)
ROUND(c, d, e, f, g, h, a, b, 14, 0x9BDC06A7)
ROUND(b, c, d, e, f, g, h, a, 15, 0xC19BF174)
ROUND(a, b, c, d, e, f, g, h, 16, 0xE49B69C1)
ROUND(h, a, b, c, d, e, f, g, 17, 0xEFBE4786)
ROUND(g, h, a, b, c, d, e, f, 18, 0x0FC19DC6)
ROUND(f, g, h, a, b, c, d, e, 19, 0x240CA1CC)
ROUND(e, f, g, h, a, b, c, d, 20, 0x2DE92C6F)
ROUND(d, e, f, g, h, a, b, c, 21, 0x4A7484AA)
ROUND(c, d, e, f, g, h, a, b, 22, 0x5CB0A9DC)
ROUND(b, c, d, e, f, g, h, a, 23, 0x76F988DA)
ROUND(a, b, c, d, e, f, g, h, 24, 0x983E5152)
ROUND(h, a, b, c, d, e, f, g, 25, 0xA831C66D)
ROUND(g, h, a, b, c, d, e, f, 26, 0xB00327C8)
ROUND(f, g, h, a, b, c, d, e, 27, 0xBF597FC7)
ROUND(e, f, g, h, a, b, c, d, 28, 0xC6E00BF3)
ROUND(d, e, f, g, h, a, b, c, 29, 0xD5A79147)
ROUND(c, d, e, f, g, h, a, b, 30, 0x06CA6351)
ROUND(b, c, d, e, f, g, h, a, 31, 0x14292967)
ROUND(a, b, c, d, e, f, g, h, 32, 0x27B70A85)
ROUND(h, a, b, c, d, e, f, g, 33, 0x2E1B2138)
ROUND(g, h, a, b, c, d, e, f, 34, 0x4D2C6DFC)
ROUND(f, g, h, a, b, c, d, e, 35, 0x53380D13)
ROUND(e, f, g, h, a, b, c, d, 36, 0x650A7354)
ROUND(d, e, f, g, h, a, b, c, 37, 0x766A0ABB)
ROUND(c, d, e, f, g, h, a, b, 38, 0x81C2C92E)
ROUND(b, c, d, e, f, g, h, a, 39, 0x92722C85)
ROUND(a, b, c, d, e, f, g, h, 40, 0xA2BFE8A1)
ROUND(h, a, b, c, d, e, f, g, 41, 0xA81A664B)
ROUND(g, h, a, b, c, d, e, f, 42, 0xC24B8B70)
ROUND(f, g, h, a, b, c, d, e, 43, 0xC76C51A3)
ROUND(e, f, g, h, a, b, c, d, 44, 0xD192E819)
ROUND(d, e, f, g, h, a, b, c, 45, 0xD6990624)
ROUND(c, d, e, f, g, h, a, b, 46, 0xF40E3585)
ROUND(b, c, d, e, f, g, h, a, 47, 0x106AA070)
ROUND(a, b, c, d, e, f, g, h, 48, 0x19A4C116)
ROUND(h, a, b, c, d, e, f, g, 49, 0x1E376C08)
ROUND(g, h, a, b, c, d, e, f, 50, 0x2748774C)
ROUND(f, g, h, a, b, c, d, e, 51, 0x34B0BCB5)
ROUND(e, f, g, h, a, b, c, d, 52, 0x391C0CB3)
ROUND(d, e, f, g, h, a, b, c, 53, 0x4ED8AA4A)
ROUND(c, d, e, f, g, h, a, b, 54, 0x5B9CCA4F)
ROUND(b, c, d, e, f, g, h, a, 55, 0x682E6FF3)
ROUND(a, b, c, d, e, f, g, h, 56, 0x748F82EE)
ROUND(h, a, b, c, d, e, f, g, 57, 0x78A5636F)
ROUND(g, h, a, b, c, d, e, f, 58, 0x84C87814)
ROUND(f, g, h, a, b, c, d, e, 59, 0x8CC70208)
ROUND(e, f, g, h, a, b, c, d, 60, 0x90BEFFFA)
ROUND(d, e, f, g, h, a, b, c, 61, 0xA4506CEB)
ROUND(c, d, e, f, g, h, a, b, 62, 0xBEF9A3F7)
ROUND(b, c, d, e, f, g, h, a, 63, 0xC67178F2)
stateP[0] += a;
stateP[1] += b;
stateP[2] += c;
stateP[3] += d;
stateP[4] += e;
stateP[5] += f;
stateP[6] += g;
stateP[7] += h;
// Since this implementation uses little endian byte ordering and SHA uses big endian,
// reverse all the bytes when copying the final state to the output hash.
for (int i = 0; i < 4; ++i) {
l = i << 3;
*(res) = (stateP[0] >> (24 - l)) & 0x000000ff;
*(res + 4) = (stateP[1] >> (24 - l)) & 0x000000ff;
*(res + 8) = (stateP[2] >> (24 - l)) & 0x000000ff;
*(res + 12) = (stateP[3] >> (24 - l)) & 0x000000ff;
*(res + 16) = (stateP[4] >> (24 - l)) & 0x000000ff;
*(res + 20) = (stateP[5] >> (24 - l)) & 0x000000ff;
*(res + 24) = (stateP[6] >> (24 - l)) & 0x000000ff;
*(res + 28) = (stateP[7] >> (24 - l)) & 0x000000ff;
++res;
}
}
__device__ inline void initSHA256ConstantAndCharSet(const unsigned int charSetSize)
{
char* plainCharSetP = plainCharSet[threadIdx.x];
//for (i = 0;i < charSetSize;i++) {
// plainCharSetP[i] = srcCharSet[i];
//}
for (int i = 0;i < charSetSize;i++) {
plainCharSetP[i] = constantAreaPlainCharSet[i];
}
}
__device__ inline ulong hashToIndex(unsigned char* hash, int pos, ulong plainSpace)
{
ulong* hashP = (ulong*)hash;
return (ulong)((*(hashP)+ pos)) % plainSpace;
}
__device__ ulong hashToIndex(unsigned char* hash, int pos)
{
ulong* hashP = (ulong*)hash;
return (ulong)(((*(hashP) ^ *(hashP + 1) ^ *(hashP + 2) ^ *(hashP + 3)) + pos));
}
__device__ inline ulong reductFinalIndex(ulong index, uint8_t plainLength, uint8_t plainCharSize)
{
ulong res = 0;
uint8_t plainIndex[9];
for (int l = plainLength - 1; l >= 0; l--) {
// 32 - 126
plainIndex[l] = ((uint8_t)(index & 0x7f)) % plainCharSize + 32;
index >>= 7;
}
int j;
for (j = 0; j < plainLength - 1; j++) {
res += plainIndex[j];
res <<= 7;
}
res += plainIndex[j];
return res;
}
__global__ void generateChainPaperVersion(struct Chain* chains, const uint8_t plainCharSetSize,
const uint8_t plainLength, const unsigned int chainLength)
{
//initSHA256ConstantAndCharSet(plainCharSetSize);
unsigned char hash[32];
char plain[8];
uint offset = (blockIdx.x * blockDim.x) + threadIdx.x;
struct Chain* chain;
chain = chains + offset;
ulong indexE = chain->indexS;
for (int i = 0;i < chainLength;i++) {
//plainToHashWithInlinePTX((char *)&indexE, INDEX_SIZE_IN_BYTES, hash);
plainToHashWithInlinePTX(indexE, plainLength, hash, plainCharSetSize);
//indexE = hashToIndexWithoutCharSet(hash, i, plainCharSetSize);
indexE = hashToIndex(hash, i);
}
chain->indexE = reductFinalIndex(indexE,plainLength,plainCharSetSize);
//for (int i = 0;i < chainLength;i++) {
// indexToPlain(indexE, plainLength, plainCharSetSize, plain);
// plainToHashWithInlinePTX(plain, plainLength, hash);
// hashToIndex(hash, i);
//}
}
__global__ void generateChainPaperVersion(struct Chain* chains, const uint8_t plainCharSetSize,
const uint8_t plainLength, const unsigned int chainLength, ulong plainSpace)
{
//initSHA256ConstantAndCharSet(plainCharSetSize);
unsigned char hash[32];
char plain[8];
uint offset = (blockIdx.x * blockDim.x) + threadIdx.x;
struct Chain* chain;
chain = chains + offset;
ulong indexE = chain->indexS;
//for (int i = 0;i < chainLength;i++) {
// //plainToHashWithInlinePTX((char *)&indexE, INDEX_SIZE_IN_BYTES, hash);
// plainToHashWithInlinePTX(indexE, plainLength, hash, plainCharSetSize);
// //indexE = hashToIndexWithoutCharSet(hash, i, plainCharSetSize);
// indexE = hashToIndex(hash, i, 0x0fffffff);
//}
//chain->indexE = reductFinalIndex(indexE,plainLength,plainCharSetSize);
//}
for (int i = 0;i < chainLength;i++) {
indexToPlain(indexE, plainLength, plainCharSetSize, plain);
plainToHashWithInlinePTX(plain, plainLength, hash);
indexE = hashToIndex(hash, i, plainSpace);
}
chain->indexE = indexE;
}
__global__ void generateChain(struct PasswordMapping* chains, const uint8_t plainCharSetSize)
{
uint32_t offset = (blockIdx.x * blockDim.x) + threadIdx.x;
if (offset < plainCharSetSize) {
struct PasswordMapping* chain = chains + offset;
plainToHash(chain->plain, 1, chain->hash);
}else if (offset < plainCharSetSize + plainCharSetSize * plainCharSetSize) {
struct PasswordMapping* chain = chains + offset;
plainToHash(chain->plain, 2, chain->hash);
}else if (offset < plainCharSetSize + plainCharSetSize * plainCharSetSize + plainCharSetSize * plainCharSetSize * plainCharSetSize) {
struct PasswordMapping* chain = chains + offset;
plainToHash(chain->plain, 3, chain->hash);
}
}
void generateTableWhilePasswordLengthLowerOrEqualThan3(const char* hostCharSetPath , const uint8_t plainCharSetSize)
{
const uint32_t CHAINS_SIZE = plainCharSetSize + plainCharSetSize * plainCharSetSize + plainCharSetSize * plainCharSetSize * plainCharSetSize;
struct PasswordMapping* deviceChains;
struct PasswordMapping* hostChains;
char* hostCharSet;
CUDA_CALL(hipHostMalloc(&hostChains, CHAINS_SIZE * sizeof(struct PasswordMapping), hipHostMallocDefault));
CUDA_CALL(hipHostMalloc(&hostCharSet, plainCharSetSize * sizeof(char), hipHostMallocDefault));
getCharSet(hostCharSet, hostCharSetPath, plainCharSetSize);
generateInitialIndex(hostChains, hostCharSet, plainCharSetSize);
CUDA_CALL(hipMalloc(&deviceChains, CHAINS_SIZE * sizeof(struct PasswordMapping)));
CUDA_CALL(hipMemcpy(deviceChains, hostChains, CHAINS_SIZE * sizeof(struct PasswordMapping), hipMemcpyHostToDevice));
uint32_t threadPerBlock = 384;
uint32_t blockNum = CHAINS_SIZE / threadPerBlock + 1;
hipEvent_t startEvent;
hipEvent_t endEvent;
float cudaElapsedTime = 0.0f;
hipEventCreate(&startEvent);
hipEventCreate(&endEvent);
hipEventRecord(startEvent, 0);
hipLaunchKernelGGL(( generateChain), dim3(blockNum), dim3(threadPerBlock), 0, 0, deviceChains, plainCharSetSize);
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&cudaElapsedTime, startEvent, endEvent);
QSort(deviceChains, CHAINS_SIZE);
CUDA_CALL(hipMemcpy(hostChains, deviceChains, CHAINS_SIZE * sizeof(struct PasswordMapping), hipMemcpyDeviceToHost));
writeToFile((string("../") + "1-3#" + "ascii-32-95#" + "1").c_str(), hostChains, sizeof(struct PasswordMapping), CHAINS_SIZE);
hipHostFree(hostChains);
hipHostFree(hostCharSet);
hipFree(deviceChains);
//hipEventDestroy(startEvent);
//hipEventDestroy(endEvent);
hipDeviceReset();
printf("%.3lf MH/S", (CHAINS_SIZE) / (cudaElapsedTime * 1000.0));
}
//int main()
//{
// generateTableWhilePasswordLengthLowerOrEqualThan3("../charsets/ascii-32-95.txt", 95);
//
// //constexpr uint32_t CHAINS_SIZE = 95 + 95 * 95 + 95 * 95 * 95;
// //struct PasswordMapping* mappings;
// //hipHostMalloc(&mappings, sizeof(struct PasswordMapping) * CHAINS_SIZE, hipHostMallocDefault);
// //openTableFile((string("../") + "1-3#" + "ascii-32-95#" + "1").c_str(), mappings, sizeof(struct PasswordMapping), CHAINS_SIZE);
// //for (int i = 0;i < CHAINS_SIZE;i++) {
// // printf("%s\n", mappings[i].hash);
// //}
// //getchar();
//
// return 0;
//}
void generateTable(const uint8_t plainLength, const char* hostCharSetPath, const uint8_t plainCharSetSize)
{
// chainSize = blockNum * threadPerBlock
// cover = chainSize * chainLength
const uint32_t threadPerBlock = 384;
uint32_t blockNum = 0;
uint32_t CHAINS_SIZE = 0;
uint32_t chainLength = 0;
// default plainCharSetSize == 95
// strategy
switch (plainLength) {
// the collision ration is quite high, especially when the plainLength is low
// the paper's reductant version is better when the plainLength is low (4,5,6)
// must split the table , even when the table size is low
case 4:
chainLength = 350;
CHAINS_SIZE = 384000;
blockNum = 1000;
break;
case 5:
chainLength = 3600;
CHAINS_SIZE = 2304000;
blockNum = 6000;
break;
case 6:
chainLength = 60000;
CHAINS_SIZE = 15360000;
blockNum = 40000;
}
struct Chain* devicePointer;
struct Chain* hostPointer;
char* hostCharSet;
char* deviceCharSet;
//CUDA_CALL(hipHostMalloc(&hostPointer, CHAINS_SIZE * sizeof(struct Chain), hipHostMallocDefault | hipHostMallocMapped));
//CUDA_CALL(hipHostMalloc(&hostCharSet, 36 * sizeof(char), hipHostMallocDefault | hipHostMallocMapped));
CUDA_CALL(hipHostMalloc(&hostPointer, CHAINS_SIZE * sizeof(struct Chain), hipHostMallocDefault));
CUDA_CALL(hipHostMalloc(&hostCharSet, plainCharSetSize * sizeof(char), hipHostMallocDefault));
getCharSet(hostCharSet, hostCharSetPath, plainCharSetSize);
generateInitialIndex(hostPointer, CHAINS_SIZE);
//printf("%llu", hostPointer[0].indexS);
CUDA_CALL(hipMalloc(&devicePointer, CHAINS_SIZE * sizeof(struct Chain)));
CUDA_CALL(hipMalloc(&deviceCharSet, plainCharSetSize * sizeof(char)));
CUDA_CALL(hipMemcpy(deviceCharSet, hostCharSet, plainCharSetSize * sizeof(char), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(devicePointer, hostPointer, CHAINS_SIZE * sizeof(struct Chain), hipMemcpyHostToDevice));
//CUDA_CALL(hipMemcpyToSymbol(constantAreaPlainCharSet, hostCharSet, sizeof(char) * plainCharSetSize));
ulong plainSpace = pow(plainCharSetSize, plainLength);
hipEvent_t startEvent;
hipEvent_t endEvent;
float cudaElapsedTime = 0.0f;
hipEventCreate(&startEvent);
hipEventCreate(&endEvent);
hipEventRecord(startEvent, 0);
generateChainPaperVersion << <blockNum, threadPerBlock >> > (devicePointer, plainCharSetSize, plainLength, chainLength);
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&cudaElapsedTime, startEvent, endEvent);
thrust::device_ptr<struct Chain> thrustChainP(devicePointer);
thrust::sort(thrustChainP, thrustChainP + CHAINS_SIZE, ChainComparator());
CUDA_CALL(hipMemcpy(hostPointer, devicePointer, CHAINS_SIZE * sizeof(struct Chain), hipMemcpyDeviceToHost));
struct Chain* forWrite;
CUDA_CALL(hipHostMalloc(&forWrite, sizeof(struct Chain) * CHAINS_SIZE, hipHostMallocDefault));
uint32_t actualSize = removeDuplicate(forWrite, hostPointer, CHAINS_SIZE);
writeToFile(fileNameBuilder("../", plainLength, hostCharSetPath, 1, actualSize, chainLength).c_str(), forWrite, sizeof(struct Chain), actualSize);
//writeToFile("../5#ascii-32-95#1#384000#350", hostPointer, sizeof(struct Chain), CHAINS_SIZE);
hipHostFree(hostPointer);
hipHostFree(hostCharSet);
hipFree(deviceCharSet);
hipFree(devicePointer);
//hipEventDestroy(startEvent);
//hipEventDestroy(endEvent);
hipDeviceReset();
printf("%.3lf MH/S", (CHAINS_SIZE * (ulong)chainLength) / (cudaElapsedTime * 1000.0));
}
int main()
{
generateTable(4, "../charsets/ascii-32-95.txt", 95);
return 0;
}
//int main(int argc, char *argv[])
//{
// const uint CHAINS_SIZE = 7680000;
// int plainLength = 4;
// int chainLength = 100000;
//
// int plainCharSetSize = 95;
//
// //hipSetDeviceFlags(hipDeviceMapHost);
// struct Chain* devicePointer;
// struct Chain* hostPointer;
// char* hostCharSet;
// char* deviceCharSet;
// //CUDA_CALL(hipHostMalloc(&hostPointer, CHAINS_SIZE * sizeof(struct Chain), hipHostMallocDefault | hipHostMallocMapped));
// //CUDA_CALL(hipHostMalloc(&hostCharSet, 36 * sizeof(char), hipHostMallocDefault | hipHostMallocMapped));
// CUDA_CALL(hipHostMalloc(&hostPointer, CHAINS_SIZE * sizeof(struct Chain), hipHostMallocDefault));
// CUDA_CALL(hipHostMalloc(&hostCharSet, plainCharSetSize * sizeof(char), hipHostMallocDefault));
//
// getCharSet(hostCharSet, "../charsets/ascii-32-95.txt", plainCharSetSize);
//
// generateInitialIndex(hostPointer, CHAINS_SIZE);
//
// //printf("%llu", hostPointer[0].indexS);
//
// CUDA_CALL(hipMalloc(&devicePointer, CHAINS_SIZE * sizeof(struct Chain)));
// CUDA_CALL(hipMalloc(&deviceCharSet, plainCharSetSize * sizeof(char)));
//
// CUDA_CALL(hipMemcpy(deviceCharSet, hostCharSet, plainCharSetSize * sizeof(char), hipMemcpyHostToDevice));
// CUDA_CALL(hipMemcpy(devicePointer, hostPointer, CHAINS_SIZE * sizeof(struct Chain), hipMemcpyHostToDevice));
//
// CUDA_CALL(hipMemcpyToSymbol(constantAreaPlainCharSet, hostCharSet, sizeof(char) * plainCharSetSize));
//
// /*hiprandGenerator_t randGeneratorDevice;
// const ulong seed = 987654321;
// const hiprandRngType_t generatorType = HIPRAND_RNG_PSEUDO_DEFAULT;
//
// hiprandCreateGenerator(&randGeneratorDevice, generatorType);
// hiprandSetPseudoRandomGeneratorSeed(randGeneratorDevice, seed);
// hiprandGenerateLongLong(randGeneratorDevice, (ulong *)devicePointer, CHAINS_SIZE * 2);*/
//
// int threadPerBlock = 384;
// uint blockNum = CHAINS_SIZE / threadPerBlock;
//
// hipEvent_t startEvent;
// hipEvent_t endEvent;
// float cudaElapsedTime = 0.0f;
// hipEventCreate(&startEvent);
// hipEventCreate(&endEvent);
// hipEventRecord(startEvent, 0);
//
// generateChainPaperVersion << <blockNum, threadPerBlock >> > (devicePointer, plainCharSetSize, plainLength, chainLength);
//
// hipEventRecord(endEvent, 0);
// hipEventSynchronize(endEvent);
// hipEventElapsedTime(&cudaElapsedTime, startEvent, endEvent);
//
// thrust::device_ptr<struct Chain> thrustChainP(devicePointer);
// thrust::sort(thrustChainP, thrustChainP + CHAINS_SIZE, ChainComparator());
//
// CUDA_CALL(hipMemcpy(hostPointer, devicePointer, CHAINS_SIZE * sizeof(struct Chain), hipMemcpyDeviceToHost));
//
// // plainLength#charSet#table#tableLength#chainLength
//
// // 1-3#ascii-32-95#1#0#chainLength
// //writeToFile((string("../") + "1-3#" + "ascii-32-95#" + "1#" + "0#" + ).c_str(), hostPointer, sizeof(struct Chain), CHAINS_SIZE);
// writeToFile("../t5.rt", hostPointer, sizeof(struct Chain), CHAINS_SIZE);
//
//
// hipHostFree(hostPointer);
// hipHostFree(hostCharSet);
// hipFree(deviceCharSet);
// hipFree(devicePointer);
// //hipEventDestroy(startEvent);
// //hipEventDestroy(endEvent);
//
// hipDeviceReset();
//
// printf("%.3lf MH/S", (CHAINS_SIZE * (ulong)chainLength) / (cudaElapsedTime * 1000.0));
//
// getchar();
//
//
//
// return 0;
//}
| 6b47828c318e94a288bc58607d06569b17d055d9.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math_functions.h"
#include "round.cuh"
#include <cmath>
#include <stdio.h>
#include <curand_kernel.h>
#include "utils.h"
#include <thrust\device_ptr.h>
#include <thrust\sort.h>
#include <map>
#include <string>
#include <sstream>
//#pragma comment(lib, "curand.lib")
using std::map;
using std::string;
using std::stringstream;
#define INDEX_SIZE_IN_BYTES 8
#define EXTRACT_9 0x7fffffffffffffff
#define EXTRACT_8 0x00ffffffffffffff
#define EXTRACT_7 0x0001ffffffffffff
#define EXTRACT_6 0x000003ffffffffff
#define ROTR32(x, n) (((0U + (x)) << (32 - (n))) | ((x) >> (n))) // Assumes that x is uint32_t and 0 < n < 32
#define LOADSCHEDULE(i) \
schedule[i] = (uint32_t)data[i * 4 + 0] << 24 \
| (uint32_t)data[i * 4 + 1] << 16 \
| (uint32_t)data[i * 4 + 2] << 8 \
| (uint32_t)data[i * 4 + 3] << 0;
#define SCHEDULE(i) \
schedule[i] = 0U + schedule[i - 16] + schedule[i - 7] \
+ (ROTR32(schedule[i - 15], 7) ^ ROTR32(schedule[i - 15], 18) ^ (schedule[i - 15] >> 3)) \
+ (ROTR32(schedule[i - 2], 17) ^ ROTR32(schedule[i - 2], 19) ^ (schedule[i - 2] >> 10));
//#define SCHEDULE(i) \
// asm("{\n\t" \
// ".reg .u32 t1;\n\t" \
// ".reg .u32 t2;\n\t" \
// ".reg .u32 t3;\n\t" \
// ".reg .u32 s1;\n\t" \
// ".reg .u32 s2;\n\t" \
// ".reg .u32 s3;\n\t" \
// ".reg .u32 s4;\n\t" \
// "mov.u32 s1, %1;\n\t" \
// "mov.u32 s2, %2;\n\t" \
// "mov.u32 s3, %3;\n\t" \
// "mov.u32 s4, %4;\n\t" \
// "add.u32 t1, s1, s2;\n\t" \
// "shf.r.clamp.b32 t2, s3, s3, 7;\n\t" \
// "shf.r.clamp.b32 t3, s3, s3, 18;\n\t" \
// "xor.b32 t2, t2, t3;\n\t" \
// "shr.u32 t3, s3, 3;\n\t" \
// "xor.b32 t2, t2 ,t3;\n\t" \
// "add.u32 t1, t1, t2;\n\t" \
// "shf.r.clamp.b32 t2, s4, s4, 17;\n\t" \
// "shf.r.clamp.b32 t3, s4, s4, 19;\n\t" \
// "xor.b32 t2, t2, t3;\n\t" \
// "shr.u32 t3, %4, 10;\n\t" \
// "xor.b32 t2, t2, t3;\n\t" \
// "add.u32 t1, t1, t2;\n\t" \
// "mov.u32 %0, t1;\n\t" \
// "}" \
// : "=r"(schedule[i]) : "r"(schedule[i - 16]), "r"(schedule[i - 7]), "r"(schedule[i - 15]), "r"(schedule[i - 2]));
//#define SCHEDULE(i) \
// asm("{\n\t" \
// ".reg .u32 t2;\n\t" \
// ".reg .u32 t3;\n\t" \
// "shf.r.clamp.b32 t2, %3, %3, 7;\n\t" \
// "shf.r.clamp.b32 t3, %3, %3, 18;\n\t" \
// "xor.b32 t2, t2, t3;\n\t" \
// "shr.u32 t3, %3, 3;\n\t" \
// "xor.b32 t2, t2 ,t3;\n\t" \
// "add.u32 %0, %0, t2;\n\t" \
// "shf.r.clamp.b32 t2, %4, %4, 17;\n\t" \
// "shf.r.clamp.b32 t3, %4, %4, 19;\n\t" \
// "xor.b32 t2, t2, t3;\n\t" \
// "shr.u32 t3, %4, 10;\n\t" \
// "xor.b32 t2, t2, t3;\n\t" \
// "add.u32 %0, %0, t2;\n\t" \
// "add.u32 t2, %1, %2;\n\t" \
// "add.u32 %0, %0, t2;\n\t" \
// "}" \
// : "=r"(schedule[i]) : "r"(schedule[i - 16]), "r"(schedule[i - 7]), "r"(schedule[i - 15]), "r"(schedule[i - 2]));
#define ROUND(a, b, c, d, e, f, g, h, i, k) \
h = 0U + h + (ROTR32(e, 6) ^ ROTR32(e, 11) ^ ROTR32(e, 25)) + (g ^ (e & (f ^ g))) + UINT32_C(k) + schedule[i]; \
d = 0U + d + h; \
h = 0U + h + (ROTR32(a, 2) ^ ROTR32(a, 13) ^ ROTR32(a, 22)) + ((a & (b | c)) | (b & c));
#define CUDA_CALL(x) {const cudaError_t a = (x);if(a!=cudaSuccess){printf("\nCUDA Error:%s(err_num=%d)\n",cudaGetErrorString(a),a);}}
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0)
//__shared__ uint k[64];
__constant__ char constantAreaPlainCharSet[36];
__shared__ char plainCharSet[384][95];
__shared__ uint state[384][8];
struct ChainComparator {
__host__ __device__
bool operator()(const struct Chain& lhs, const struct Chain& rhs) {
return lhs.indexE < rhs.indexE;
}
};
struct HashCompartor {
__host__ __device__
bool operator()(const struct PasswordMapping& lhs, const struct PasswordMapping& rhs) {
//const ulong* lhsP = (const ulong*)lhs.hash;
//const ulong* rhsP = (const ulong*)rhs.hash;
//ulong lhs4 = *(lhsP + 3);
//ulong lhs3 = *(lhsP + 2);
//ulong lhs2 = *(lhsP + 1);
//ulong lhs1 = *(lhsP);
//ulong rhs4 = *(rhsP + 3);
//ulong rhs3 = *(rhsP + 2);
//ulong rhs2 = *(rhsP + 1);
//ulong rhs1 = *(rhsP);
//return lhs1 < rhs1
// || lhs1 == rhs1 && lhs2 < rhs2
// || lhs1 == rhs1 && lhs2 == rhs2 && lhs3 < rhs3
// || lhs1 == rhs1 && lhs2 == rhs2 && lhs3 == rhs3 && lhs4 < rhs4;
bool flap = true;
for (int i = 0; i < 32; i++) {
if (lhs.hash[i] > rhs.hash[i]) {
flap = false;
break;
}
}
if (flap) {
return true;
}
else {
return false;
}
}
};
void QSort(struct PasswordMapping* mappings, uint32_t CHAINS_SIZE) {
thrust::device_ptr<struct PasswordMapping> thrustChainP(mappings);
thrust::sort(thrustChainP, thrustChainP + CHAINS_SIZE, HashCompartor());
}
__device__ void indexToPlain(ulong index, const uint8_t plainLength,
const uint8_t plainCharsetSize, char* plain)
{
for (int i = plainLength - 1;i >= 0;i--) {
plain[i] = index % plainCharsetSize;
index /= plainCharsetSize;
}
}
__device__ inline void indexToPlain(ulong index, char* plain, const uint8_t plainLength, const char* charSet, const unsigned int charSetSize)
{
for (int i = plainLength - 1; i >= 0; i--) {
plain[i] = charSet[(index & 0x7f) % charSetSize];
index >>= 7;
}
}
/*__device__ ulong plainToIndex(const char* plain, size_t plainLength, const char* charSet, size_t charSetSize, map<char, size_t>* charIndexMap)
{
ulong index = 0;
int i;
for (i = 0;i<plainLength - 1;i++) {
index += charIndexMap->operator[](plain[i]) & 0x7f;
index <<= 7;
}
index += charIndexMap->operator[](plain[i]) & 0x7f;
return index;
}*/
__device__ inline ulong hashToIndexPaperVersion(unsigned char* hash, int pos, const uint8_t plainCharSetSize)
{
unsigned int* hashP = (unsigned int*)hash;
unsigned int p0 = *(hashP + 4) ^ pos;
unsigned int p2 = *(hashP + 5) ^ (pos >> 12);
unsigned int p4 = *(hashP + 6) ^ (pos >> 24);
unsigned int p6 = *(hashP + 7);
char* plainCharSetP = plainCharSet[threadIdx.x];
unsigned __int16 b0 = plainCharSetP[p0 % plainCharSetSize] << 8 | plainCharSetP[(p0 >> 16) % plainCharSetSize];
unsigned __int16 b1 = plainCharSetP[p2 % plainCharSetSize] << 8 | plainCharSetP[(p2 >> 16) % plainCharSetSize];
unsigned __int16 b2 = plainCharSetP[p4 % plainCharSetSize] << 8 | plainCharSetP[(p4 >> 16) % plainCharSetSize];
unsigned __int16 b3 = plainCharSetP[p6 % plainCharSetSize] << 8 | plainCharSetP[(p6 >> 16) % plainCharSetSize];
/*unsigned __int16 b0 = constantAreaPlainCharSet[p0 % plainCharSetSize] << 8 | constantAreaPlainCharSet[(p0 >> 16) % plainCharSetSize];
unsigned __int16 b1 = constantAreaPlainCharSet[p2 % plainCharSetSize] << 8 | constantAreaPlainCharSet[(p2 >> 16) % plainCharSetSize];
unsigned __int16 b2 = constantAreaPlainCharSet[p4 % plainCharSetSize] << 8 | constantAreaPlainCharSet[(p4 >> 16) % plainCharSetSize];
unsigned __int16 b3 = constantAreaPlainCharSet[p6 % plainCharSetSize] << 8 | constantAreaPlainCharSet[(p6 >> 16) % plainCharSetSize];*/
ulong index = 0;
index += b0;
index <<= 16;
index += b1;
index <<= 16;
index += b2;
index <<= 16;
index += b3;
return index;
}
__device__ inline ulong hashToIndexWithoutCharSet(unsigned char* hash, int pos, const uint8_t plainCharSetSize)
{
unsigned int* hashP = (unsigned int*)hash;
unsigned int p0 = *(hashP + 4) ^ pos;
unsigned int p2 = *(hashP + 5) ^ (pos >> 12);
unsigned int p4 = *(hashP + 6) ^ (pos >> 24);
unsigned int p6 = *(hashP + 7);
unsigned __int16 b0 = ((p0 % plainCharSetSize) << 8) | ((p0 >> 16) % plainCharSetSize);
unsigned __int16 b1 = ((p2 % plainCharSetSize) << 8) | ((p2 >> 16) % plainCharSetSize);
unsigned __int16 b2 = ((p4 % plainCharSetSize) << 8) | ((p4 >> 16) % plainCharSetSize);
unsigned __int16 b3 = ((p6 % plainCharSetSize) << 8) | ((p6 >> 16) % plainCharSetSize);
ulong index = 0;
index += b0;
index <<= 16;
index += b1;
index <<= 16;
index += b2;
index <<= 16;
index += b3;
return index;
}
__device__ inline void plainToHashWithInlinePTX(const char* plain, const uint8_t length, unsigned char* res) {
unsigned int bitlen0 = 0;
unsigned int bitlen1 = 0;
unsigned int stateP[8];
//unsigned int* stateP = state[threadIdx.x];
unsigned char data[64];
unsigned int l;
for (l = 0; l < length; ++l) {
data[l] = plain[l];
}
stateP[0] = 0x6a09e667;
stateP[1] = 0xbb67ae85;
stateP[2] = 0x3c6ef372;
stateP[3] = 0xa54ff53a;
stateP[4] = 0x510e527f;
stateP[5] = 0x9b05688c;
stateP[6] = 0x1f83d9ab;
stateP[7] = 0x5be0cd19;
data[l++] = 0x80;
while (l < 56)
data[l++] = 0x00;
//// Append to the padding the total message's length in bits and transform.
DBL_INT_ADD(bitlen0, bitlen1, length * 8);
data[63] = bitlen0;
data[62] = bitlen0 >> 8;
data[61] = bitlen0 >> 16;
data[60] = bitlen0 >> 24;
data[59] = bitlen1;
data[58] = bitlen1 >> 8;
data[57] = bitlen1 >> 16;
data[56] = bitlen1 >> 24;
uint32_t schedule[16];
uint32_t a = stateP[0];
uint32_t b = stateP[1];
uint32_t c = stateP[2];
uint32_t d = stateP[3];
uint32_t e = stateP[4];
uint32_t f = stateP[5];
uint32_t g = stateP[6];
uint32_t h = stateP[7];
ROUNDa(0, a, b, c, d, e, f, g, h, 0x428A2F98)
ROUNDa(1, h, a, b, c, d, e, f, g, 0x71374491)
ROUNDa(2, g, h, a, b, c, d, e, f, -0x4A3F0431)
ROUNDa(3, f, g, h, a, b, c, d, e, -0x164A245B)
ROUNDa(4, e, f, g, h, a, b, c, d, 0x3956C25B)
ROUNDa(5, d, e, f, g, h, a, b, c, 0x59F111F1)
ROUNDa(6, c, d, e, f, g, h, a, b, -0x6DC07D5C)
ROUNDa(7, b, c, d, e, f, g, h, a, -0x54E3A12B)
ROUNDa(8, a, b, c, d, e, f, g, h, -0x27F85568)
ROUNDa(9, h, a, b, c, d, e, f, g, 0x12835B01)
ROUNDa(10, g, h, a, b, c, d, e, f, 0x243185BE)
ROUNDa(11, f, g, h, a, b, c, d, e, 0x550C7DC3)
ROUNDa(12, e, f, g, h, a, b, c, d, 0x72BE5D74)
ROUNDa(13, d, e, f, g, h, a, b, c, -0x7F214E02)
ROUNDa(14, c, d, e, f, g, h, a, b, -0x6423F959)
ROUNDa(15, b, c, d, e, f, g, h, a, -0x3E640E8C)
ROUND16(16, a, b, c, d, e, f, g, h, -0x1B64963F)
ROUND17(17, h, a, b, c, d, e, f, g, -0x1041B87A)
ROUND18(18, g, h, a, b, c, d, e, f, 0x0FC19DC6)
ROUND19(19, f, g, h, a, b, c, d, e, 0x240CA1CC)
ROUND20(20, e, f, g, h, a, b, c, d, 0x2DE92C6F)
ROUND21(21, d, e, f, g, h, a, b, c, 0x4A7484AA)
ROUND22(22, c, d, e, f, g, h, a, b, 0x5CB0A9DC)
ROUND23(23, b, c, d, e, f, g, h, a, 0x76F988DA)
ROUND24(24, a, b, c, d, e, f, g, h, -0x67C1AEAE)
ROUND25(25, h, a, b, c, d, e, f, g, -0x57CE3993)
ROUND26(26, g, h, a, b, c, d, e, f, -0x4FFCD838)
ROUND27(27, f, g, h, a, b, c, d, e, -0x40A68039)
ROUND28(28, e, f, g, h, a, b, c, d, -0x391FF40D)
ROUND29(29, d, e, f, g, h, a, b, c, -0x2A586EB9)
ROUND30(30, c, d, e, f, g, h, a, b, 0x06CA6351)
ROUND31(31, b, c, d, e, f, g, h, a, 0x14292967)
ROUND16(32, a, b, c, d, e, f, g, h, 0x27B70A85)
ROUND17(33, h, a, b, c, d, e, f, g, 0x2E1B2138)
ROUND18(34, g, h, a, b, c, d, e, f, 0x4D2C6DFC)
ROUND19(35, f, g, h, a, b, c, d, e, 0x53380D13)
ROUND20(36, e, f, g, h, a, b, c, d, 0x650A7354)
ROUND21(37, d, e, f, g, h, a, b, c, 0x766A0ABB)
ROUND22(38, c, d, e, f, g, h, a, b, -0x7E3D36D2)
ROUND23(39, b, c, d, e, f, g, h, a, -0x6D8DD37B)
ROUND24(40, a, b, c, d, e, f, g, h, -0x5D40175F)
ROUND25(41, h, a, b, c, d, e, f, g, -0x57E599B5)
ROUND26(42, g, h, a, b, c, d, e, f, -0x3DB47490)
ROUND27(43, f, g, h, a, b, c, d, e, -0x3893AE5D)
ROUND28(44, e, f, g, h, a, b, c, d, -0x2E6D17E7)
ROUND29(45, d, e, f, g, h, a, b, c, -0x2966F9DC)
ROUND30(46, c, d, e, f, g, h, a, b, -0x0BF1CA7B)
ROUND31(47, b, c, d, e, f, g, h, a, 0x106AA070)
ROUND16(48, a, b, c, d, e, f, g, h, 0x19A4C116)
ROUND17(49, h, a, b, c, d, e, f, g, 0x1E376C08)
ROUND18(50, g, h, a, b, c, d, e, f, 0x2748774C)
ROUND19(51, f, g, h, a, b, c, d, e, 0x34B0BCB5)
ROUND20(52, e, f, g, h, a, b, c, d, 0x391C0CB3)
ROUND21(53, d, e, f, g, h, a, b, c, 0x4ED8AA4A)
ROUND22(54, c, d, e, f, g, h, a, b, 0x5B9CCA4F)
ROUND23(55, b, c, d, e, f, g, h, a, 0x682E6FF3)
ROUND24(56, a, b, c, d, e, f, g, h, 0x748F82EE)
ROUND25(57, h, a, b, c, d, e, f, g, 0x78A5636F)
ROUND26(58, g, h, a, b, c, d, e, f, -0x7B3787EC)
ROUND27(59, f, g, h, a, b, c, d, e, -0x7338FDF8)
ROUND28(60, e, f, g, h, a, b, c, d, -0x6F410006)
ROUND29(61, d, e, f, g, h, a, b, c, -0x5BAF9315)
ROUND30(62, c, d, e, f, g, h, a, b, -0x41065C09)
ROUND31(63, b, c, d, e, f, g, h, a, -0x398E870E)
stateP[0] += a;
stateP[1] += b;
stateP[2] += c;
stateP[3] += d;
stateP[4] += e;
stateP[5] += f;
stateP[6] += g;
stateP[7] += h;
// Since this implementation uses little endian byte ordering and SHA uses big endian,
// reverse all the bytes when copying the final state to the output hash.
for (int i = 0; i < 4; ++i) {
l = i << 3;
*(res) = (stateP[0] >> (24 - l)) & 0x000000ff;
*(res + 4) = (stateP[1] >> (24 - l)) & 0x000000ff;
*(res + 8) = (stateP[2] >> (24 - l)) & 0x000000ff;
*(res + 12) = (stateP[3] >> (24 - l)) & 0x000000ff;
*(res + 16) = (stateP[4] >> (24 - l)) & 0x000000ff;
*(res + 20) = (stateP[5] >> (24 - l)) & 0x000000ff;
*(res + 24) = (stateP[6] >> (24 - l)) & 0x000000ff;
*(res + 28) = (stateP[7] >> (24 - l)) & 0x000000ff;
++res;
}
//unsigned int* resP = (unsigned int*)res;
//unsigned char* stateCP = (unsigned char*)stateP;
//*(resP) = (((unsigned int)*(stateCP)<<0)| ((unsigned int)*(stateCP+1)<<8)| ((unsigned int)*(stateCP+2)<<16)| ((unsigned int)*(stateCP+3)<<24));
//*(resP+1) = ((unsigned int)(*(stateCP+4) << 0) | ((unsigned int)*(stateCP + 5) << 8) | ((unsigned int)*(stateCP + 6) << 16) | ((unsigned int)*(stateCP + 7) << 24));
//*(resP+2) = (((unsigned int)*(stateCP+8) << 0) | ((unsigned int)*(stateCP + 9) << 8) | ((unsigned int)*(stateCP + 10) << 16) | ((unsigned int)*(stateCP + 11) << 24));
//*(resP+3) = (((unsigned int)*(stateCP+12) << 0) | ((unsigned int)*(stateCP + 13) << 8) | ((unsigned int)*(stateCP + 14) << 16) | ((unsigned int)*(stateCP + 15) << 24));
//*(resP+4) = (((unsigned int)*(stateCP+16) << 0) | ((unsigned int)*(stateCP + 17) << 8) | ((unsigned int)*(stateCP + 18) << 16) | ((unsigned int)*(stateCP + 19) << 24));
//*(resP+5) = (((unsigned int)*(stateCP+20) << 0) | ((unsigned int)*(stateCP + 21) << 8) | ((unsigned int)*(stateCP + 22) << 16) | ((unsigned int)*(stateCP + 23) << 24));
//*(resP+6) = (((unsigned int)*(stateCP+24) << 0) | ((unsigned int)*(stateCP + 25) << 8) | ((unsigned int)*(stateCP + 26) << 16) | ((unsigned int)*(stateCP + 27) << 24));
//*(resP+7) = (((unsigned int)*(stateCP+28) << 0) | ((unsigned int)*(stateCP + 29) << 8) | ((unsigned int)*(stateCP + 30) << 16) | ((unsigned int)*(stateCP + 31) << 24));
///**((unsigned int*)res) = ((*((unsigned char*)stateP) << 0) | (*((unsigned char*)stateP + 1) << 8) | (*((unsigned char*)stateP + 2) << 16) | (*((unsigned char*)stateP + 3) << 24));
//*((unsigned int*)res + 1) = ((*((unsigned char*)stateP + 4) << 0) | (*((unsigned char*)stateP + 5) << 8) | (*((unsigned char*)stateP + 6) << 16) | (*((unsigned char*)stateP + 7) << 24));
//*((unsigned int*)res + 2) = ((*((unsigned char*)stateP + 8) << 0) | (*((unsigned char*)stateP + 9) << 8) | (*((unsigned char*)stateP + 10) << 16) | (*((unsigned char*)stateP + 11) << 24));
//*((unsigned int*)res + 3) = ((*((unsigned char*)stateP + 12) << 0) | (*((unsigned char*)stateP + 13) << 8) | (*((unsigned char*)stateP + 14) << 16) | (*((unsigned char*)stateP + 15) << 24));
//*((unsigned int*)res + 4) = ((*((unsigned char*)stateP + 16) << 0) | (*((unsigned char*)stateP + 17) << 8) | (*((unsigned char*)stateP + 18) << 16) | (*((unsigned char*)stateP + 19) << 24));
//*((unsigned int*)res + 5) = ((*((unsigned char*)stateP + 20) << 0) | (*((unsigned char*)stateP + 21) << 8) | (*((unsigned char*)stateP + 22) << 16) | (*((unsigned char*)stateP + 23) << 24));
//*((unsigned int*)res + 6) = ((*((unsigned char*)stateP + 24) << 0) | (*((unsigned char*)stateP + 25) << 8) | (*((unsigned char*)stateP + 26) << 16) | (*((unsigned char*)stateP + 27) << 24));
//*((unsigned int*)res + 7) = ((*((unsigned char*)stateP + 28) << 0) | (*((unsigned char*)stateP + 29) << 8) | (*((unsigned char*)stateP + 30) << 16) | (*((unsigned char*)stateP + 31) << 24));*/
}
__device__ inline void plainToHashWithInlinePTX(ulong index, const uint8_t length, unsigned char* res, const uint8_t charSetSize) {
unsigned int bitlen0 = 0;
unsigned int bitlen1 = 0;
unsigned int stateP[8];
unsigned char data[64];
unsigned int l;
// reduct the index in the plain space
for (l = length - 1; l >= 1; l--) {
data[l] = (index & 0x7f) % charSetSize + 32;
index >>= 7;
}
data[0] = (index & 0x7f) % charSetSize + 32;
l = length;
stateP[0] = 0x6a09e667;
stateP[1] = 0xbb67ae85;
stateP[2] = 0x3c6ef372;
stateP[3] = 0xa54ff53a;
stateP[4] = 0x510e527f;
stateP[5] = 0x9b05688c;
stateP[6] = 0x1f83d9ab;
stateP[7] = 0x5be0cd19;
data[l++] = 0x80;
while (l < 56)
data[l++] = 0x00;
//// Append to the padding the total message's length in bits and transform.
DBL_INT_ADD(bitlen0, bitlen1, length * 8);
data[63] = bitlen0;
data[62] = bitlen0 >> 8;
data[61] = bitlen0 >> 16;
data[60] = bitlen0 >> 24;
data[59] = bitlen1;
data[58] = bitlen1 >> 8;
data[57] = bitlen1 >> 16;
data[56] = bitlen1 >> 24;
uint32_t schedule[16];
uint32_t a = stateP[0];
uint32_t b = stateP[1];
uint32_t c = stateP[2];
uint32_t d = stateP[3];
uint32_t e = stateP[4];
uint32_t f = stateP[5];
uint32_t g = stateP[6];
uint32_t h = stateP[7];
ROUNDa(0, a, b, c, d, e, f, g, h, 0x428A2F98)
ROUNDa(1, h, a, b, c, d, e, f, g, 0x71374491)
ROUNDa(2, g, h, a, b, c, d, e, f, -0x4A3F0431)
ROUNDa(3, f, g, h, a, b, c, d, e, -0x164A245B)
ROUNDa(4, e, f, g, h, a, b, c, d, 0x3956C25B)
ROUNDa(5, d, e, f, g, h, a, b, c, 0x59F111F1)
ROUNDa(6, c, d, e, f, g, h, a, b, -0x6DC07D5C)
ROUNDa(7, b, c, d, e, f, g, h, a, -0x54E3A12B)
ROUNDa(8, a, b, c, d, e, f, g, h, -0x27F85568)
ROUNDa(9, h, a, b, c, d, e, f, g, 0x12835B01)
ROUNDa(10, g, h, a, b, c, d, e, f, 0x243185BE)
ROUNDa(11, f, g, h, a, b, c, d, e, 0x550C7DC3)
ROUNDa(12, e, f, g, h, a, b, c, d, 0x72BE5D74)
ROUNDa(13, d, e, f, g, h, a, b, c, -0x7F214E02)
ROUNDa(14, c, d, e, f, g, h, a, b, -0x6423F959)
ROUNDa(15, b, c, d, e, f, g, h, a, -0x3E640E8C)
ROUND16(16, a, b, c, d, e, f, g, h, -0x1B64963F)
ROUND17(17, h, a, b, c, d, e, f, g, -0x1041B87A)
ROUND18(18, g, h, a, b, c, d, e, f, 0x0FC19DC6)
ROUND19(19, f, g, h, a, b, c, d, e, 0x240CA1CC)
ROUND20(20, e, f, g, h, a, b, c, d, 0x2DE92C6F)
ROUND21(21, d, e, f, g, h, a, b, c, 0x4A7484AA)
ROUND22(22, c, d, e, f, g, h, a, b, 0x5CB0A9DC)
ROUND23(23, b, c, d, e, f, g, h, a, 0x76F988DA)
ROUND24(24, a, b, c, d, e, f, g, h, -0x67C1AEAE)
ROUND25(25, h, a, b, c, d, e, f, g, -0x57CE3993)
ROUND26(26, g, h, a, b, c, d, e, f, -0x4FFCD838)
ROUND27(27, f, g, h, a, b, c, d, e, -0x40A68039)
ROUND28(28, e, f, g, h, a, b, c, d, -0x391FF40D)
ROUND29(29, d, e, f, g, h, a, b, c, -0x2A586EB9)
ROUND30(30, c, d, e, f, g, h, a, b, 0x06CA6351)
ROUND31(31, b, c, d, e, f, g, h, a, 0x14292967)
ROUND16(32, a, b, c, d, e, f, g, h, 0x27B70A85)
ROUND17(33, h, a, b, c, d, e, f, g, 0x2E1B2138)
ROUND18(34, g, h, a, b, c, d, e, f, 0x4D2C6DFC)
ROUND19(35, f, g, h, a, b, c, d, e, 0x53380D13)
ROUND20(36, e, f, g, h, a, b, c, d, 0x650A7354)
ROUND21(37, d, e, f, g, h, a, b, c, 0x766A0ABB)
ROUND22(38, c, d, e, f, g, h, a, b, -0x7E3D36D2)
ROUND23(39, b, c, d, e, f, g, h, a, -0x6D8DD37B)
ROUND24(40, a, b, c, d, e, f, g, h, -0x5D40175F)
ROUND25(41, h, a, b, c, d, e, f, g, -0x57E599B5)
ROUND26(42, g, h, a, b, c, d, e, f, -0x3DB47490)
ROUND27(43, f, g, h, a, b, c, d, e, -0x3893AE5D)
ROUND28(44, e, f, g, h, a, b, c, d, -0x2E6D17E7)
ROUND29(45, d, e, f, g, h, a, b, c, -0x2966F9DC)
ROUND30(46, c, d, e, f, g, h, a, b, -0x0BF1CA7B)
ROUND31(47, b, c, d, e, f, g, h, a, 0x106AA070)
ROUND16(48, a, b, c, d, e, f, g, h, 0x19A4C116)
ROUND17(49, h, a, b, c, d, e, f, g, 0x1E376C08)
ROUND18(50, g, h, a, b, c, d, e, f, 0x2748774C)
ROUND19(51, f, g, h, a, b, c, d, e, 0x34B0BCB5)
ROUND20(52, e, f, g, h, a, b, c, d, 0x391C0CB3)
ROUND21(53, d, e, f, g, h, a, b, c, 0x4ED8AA4A)
ROUND22(54, c, d, e, f, g, h, a, b, 0x5B9CCA4F)
ROUND23(55, b, c, d, e, f, g, h, a, 0x682E6FF3)
ROUND24(56, a, b, c, d, e, f, g, h, 0x748F82EE)
ROUND25(57, h, a, b, c, d, e, f, g, 0x78A5636F)
ROUND26(58, g, h, a, b, c, d, e, f, -0x7B3787EC)
ROUND27(59, f, g, h, a, b, c, d, e, -0x7338FDF8)
ROUND28(60, e, f, g, h, a, b, c, d, -0x6F410006)
ROUND29(61, d, e, f, g, h, a, b, c, -0x5BAF9315)
ROUND30(62, c, d, e, f, g, h, a, b, -0x41065C09)
ROUND31(63, b, c, d, e, f, g, h, a, -0x398E870E)
stateP[0] += a;
stateP[1] += b;
stateP[2] += c;
stateP[3] += d;
stateP[4] += e;
stateP[5] += f;
stateP[6] += g;
stateP[7] += h;
// Since this implementation uses little endian byte ordering and SHA uses big endian,
// reverse all the bytes when copying the final state to the output hash.
for (int i = 0; i < 4; ++i) {
l = i << 3;
*(res) = (stateP[0] >> (24 - l)) & 0x000000ff;
*(res + 4) = (stateP[1] >> (24 - l)) & 0x000000ff;
*(res + 8) = (stateP[2] >> (24 - l)) & 0x000000ff;
*(res + 12) = (stateP[3] >> (24 - l)) & 0x000000ff;
*(res + 16) = (stateP[4] >> (24 - l)) & 0x000000ff;
*(res + 20) = (stateP[5] >> (24 - l)) & 0x000000ff;
*(res + 24) = (stateP[6] >> (24 - l)) & 0x000000ff;
*(res + 28) = (stateP[7] >> (24 - l)) & 0x000000ff;
++res;
}
}
__device__ inline void plainToHash(char* plain, const uint8_t length, unsigned char* res)
{
unsigned int bitlen0 = 0;
unsigned int bitlen1 = 0;
//unsigned int stateP[8];
unsigned char data[64];
unsigned int l;
for (l = 0; l < length; ++l) {
data[l] = plain[l];
}
uint* stateP = state[threadIdx.x];
stateP[0] = 0x6a09e667;
stateP[1] = 0xbb67ae85;
stateP[2] = 0x3c6ef372;
stateP[3] = 0xa54ff53a;
stateP[4] = 0x510e527f;
stateP[5] = 0x9b05688c;
stateP[6] = 0x1f83d9ab;
stateP[7] = 0x5be0cd19;
// Pad whatever data is left in the buffer.
data[l++] = 0x80;
while (l < 56)
data[l++] = 0x00;
// Append to the padding the total message's length in bits and transform.
DBL_INT_ADD(bitlen0, bitlen1, length * 8);
data[63] = bitlen0;
data[62] = bitlen0 >> 8;
data[61] = bitlen0 >> 16;
data[60] = bitlen0 >> 24;
data[59] = bitlen1;
data[58] = bitlen1 >> 8;
data[57] = bitlen1 >> 16;
data[56] = bitlen1 >> 24;
uint32_t schedule[64];
LOADSCHEDULE(0)
LOADSCHEDULE(1)
LOADSCHEDULE(2)
LOADSCHEDULE(3)
LOADSCHEDULE(4)
LOADSCHEDULE(5)
LOADSCHEDULE(6)
LOADSCHEDULE(7)
LOADSCHEDULE(8)
LOADSCHEDULE(9)
LOADSCHEDULE(10)
LOADSCHEDULE(11)
LOADSCHEDULE(12)
LOADSCHEDULE(13)
LOADSCHEDULE(14)
LOADSCHEDULE(15)
SCHEDULE(16)
SCHEDULE(17)
SCHEDULE(18)
SCHEDULE(19)
SCHEDULE(20)
SCHEDULE(21)
SCHEDULE(22)
SCHEDULE(23)
SCHEDULE(24)
SCHEDULE(25)
SCHEDULE(26)
SCHEDULE(27)
SCHEDULE(28)
SCHEDULE(29)
SCHEDULE(30)
SCHEDULE(31)
SCHEDULE(32)
SCHEDULE(33)
SCHEDULE(34)
SCHEDULE(35)
SCHEDULE(36)
SCHEDULE(37)
SCHEDULE(38)
SCHEDULE(39)
SCHEDULE(40)
SCHEDULE(41)
SCHEDULE(42)
SCHEDULE(43)
SCHEDULE(44)
SCHEDULE(45)
SCHEDULE(46)
SCHEDULE(47)
SCHEDULE(48)
SCHEDULE(49)
SCHEDULE(50)
SCHEDULE(51)
SCHEDULE(52)
SCHEDULE(53)
SCHEDULE(54)
SCHEDULE(55)
SCHEDULE(56)
SCHEDULE(57)
SCHEDULE(58)
SCHEDULE(59)
SCHEDULE(60)
SCHEDULE(61)
SCHEDULE(62)
SCHEDULE(63)
uint32_t a = stateP[0];
uint32_t b = stateP[1];
uint32_t c = stateP[2];
uint32_t d = stateP[3];
uint32_t e = stateP[4];
uint32_t f = stateP[5];
uint32_t g = stateP[6];
uint32_t h = stateP[7];
ROUND(a, b, c, d, e, f, g, h, 0, 0x428A2F98)
ROUND(h, a, b, c, d, e, f, g, 1, 0x71374491)
ROUND(g, h, a, b, c, d, e, f, 2, 0xB5C0FBCF)
ROUND(f, g, h, a, b, c, d, e, 3, 0xE9B5DBA5)
ROUND(e, f, g, h, a, b, c, d, 4, 0x3956C25B)
ROUND(d, e, f, g, h, a, b, c, 5, 0x59F111F1)
ROUND(c, d, e, f, g, h, a, b, 6, 0x923F82A4)
ROUND(b, c, d, e, f, g, h, a, 7, 0xAB1C5ED5)
ROUND(a, b, c, d, e, f, g, h, 8, 0xD807AA98)
ROUND(h, a, b, c, d, e, f, g, 9, 0x12835B01)
ROUND(g, h, a, b, c, d, e, f, 10, 0x243185BE)
ROUND(f, g, h, a, b, c, d, e, 11, 0x550C7DC3)
ROUND(e, f, g, h, a, b, c, d, 12, 0x72BE5D74)
ROUND(d, e, f, g, h, a, b, c, 13, 0x80DEB1FE)
ROUND(c, d, e, f, g, h, a, b, 14, 0x9BDC06A7)
ROUND(b, c, d, e, f, g, h, a, 15, 0xC19BF174)
ROUND(a, b, c, d, e, f, g, h, 16, 0xE49B69C1)
ROUND(h, a, b, c, d, e, f, g, 17, 0xEFBE4786)
ROUND(g, h, a, b, c, d, e, f, 18, 0x0FC19DC6)
ROUND(f, g, h, a, b, c, d, e, 19, 0x240CA1CC)
ROUND(e, f, g, h, a, b, c, d, 20, 0x2DE92C6F)
ROUND(d, e, f, g, h, a, b, c, 21, 0x4A7484AA)
ROUND(c, d, e, f, g, h, a, b, 22, 0x5CB0A9DC)
ROUND(b, c, d, e, f, g, h, a, 23, 0x76F988DA)
ROUND(a, b, c, d, e, f, g, h, 24, 0x983E5152)
ROUND(h, a, b, c, d, e, f, g, 25, 0xA831C66D)
ROUND(g, h, a, b, c, d, e, f, 26, 0xB00327C8)
ROUND(f, g, h, a, b, c, d, e, 27, 0xBF597FC7)
ROUND(e, f, g, h, a, b, c, d, 28, 0xC6E00BF3)
ROUND(d, e, f, g, h, a, b, c, 29, 0xD5A79147)
ROUND(c, d, e, f, g, h, a, b, 30, 0x06CA6351)
ROUND(b, c, d, e, f, g, h, a, 31, 0x14292967)
ROUND(a, b, c, d, e, f, g, h, 32, 0x27B70A85)
ROUND(h, a, b, c, d, e, f, g, 33, 0x2E1B2138)
ROUND(g, h, a, b, c, d, e, f, 34, 0x4D2C6DFC)
ROUND(f, g, h, a, b, c, d, e, 35, 0x53380D13)
ROUND(e, f, g, h, a, b, c, d, 36, 0x650A7354)
ROUND(d, e, f, g, h, a, b, c, 37, 0x766A0ABB)
ROUND(c, d, e, f, g, h, a, b, 38, 0x81C2C92E)
ROUND(b, c, d, e, f, g, h, a, 39, 0x92722C85)
ROUND(a, b, c, d, e, f, g, h, 40, 0xA2BFE8A1)
ROUND(h, a, b, c, d, e, f, g, 41, 0xA81A664B)
ROUND(g, h, a, b, c, d, e, f, 42, 0xC24B8B70)
ROUND(f, g, h, a, b, c, d, e, 43, 0xC76C51A3)
ROUND(e, f, g, h, a, b, c, d, 44, 0xD192E819)
ROUND(d, e, f, g, h, a, b, c, 45, 0xD6990624)
ROUND(c, d, e, f, g, h, a, b, 46, 0xF40E3585)
ROUND(b, c, d, e, f, g, h, a, 47, 0x106AA070)
ROUND(a, b, c, d, e, f, g, h, 48, 0x19A4C116)
ROUND(h, a, b, c, d, e, f, g, 49, 0x1E376C08)
ROUND(g, h, a, b, c, d, e, f, 50, 0x2748774C)
ROUND(f, g, h, a, b, c, d, e, 51, 0x34B0BCB5)
ROUND(e, f, g, h, a, b, c, d, 52, 0x391C0CB3)
ROUND(d, e, f, g, h, a, b, c, 53, 0x4ED8AA4A)
ROUND(c, d, e, f, g, h, a, b, 54, 0x5B9CCA4F)
ROUND(b, c, d, e, f, g, h, a, 55, 0x682E6FF3)
ROUND(a, b, c, d, e, f, g, h, 56, 0x748F82EE)
ROUND(h, a, b, c, d, e, f, g, 57, 0x78A5636F)
ROUND(g, h, a, b, c, d, e, f, 58, 0x84C87814)
ROUND(f, g, h, a, b, c, d, e, 59, 0x8CC70208)
ROUND(e, f, g, h, a, b, c, d, 60, 0x90BEFFFA)
ROUND(d, e, f, g, h, a, b, c, 61, 0xA4506CEB)
ROUND(c, d, e, f, g, h, a, b, 62, 0xBEF9A3F7)
ROUND(b, c, d, e, f, g, h, a, 63, 0xC67178F2)
stateP[0] += a;
stateP[1] += b;
stateP[2] += c;
stateP[3] += d;
stateP[4] += e;
stateP[5] += f;
stateP[6] += g;
stateP[7] += h;
// Since this implementation uses little endian byte ordering and SHA uses big endian,
// reverse all the bytes when copying the final state to the output hash.
for (int i = 0; i < 4; ++i) {
l = i << 3;
*(res) = (stateP[0] >> (24 - l)) & 0x000000ff;
*(res + 4) = (stateP[1] >> (24 - l)) & 0x000000ff;
*(res + 8) = (stateP[2] >> (24 - l)) & 0x000000ff;
*(res + 12) = (stateP[3] >> (24 - l)) & 0x000000ff;
*(res + 16) = (stateP[4] >> (24 - l)) & 0x000000ff;
*(res + 20) = (stateP[5] >> (24 - l)) & 0x000000ff;
*(res + 24) = (stateP[6] >> (24 - l)) & 0x000000ff;
*(res + 28) = (stateP[7] >> (24 - l)) & 0x000000ff;
++res;
}
}
__device__ inline void initSHA256ConstantAndCharSet(const unsigned int charSetSize)
{
char* plainCharSetP = plainCharSet[threadIdx.x];
//for (i = 0;i < charSetSize;i++) {
// plainCharSetP[i] = srcCharSet[i];
//}
for (int i = 0;i < charSetSize;i++) {
plainCharSetP[i] = constantAreaPlainCharSet[i];
}
}
__device__ inline ulong hashToIndex(unsigned char* hash, int pos, ulong plainSpace)
{
ulong* hashP = (ulong*)hash;
return (ulong)((*(hashP)+ pos)) % plainSpace;
}
__device__ ulong hashToIndex(unsigned char* hash, int pos)
{
ulong* hashP = (ulong*)hash;
return (ulong)(((*(hashP) ^ *(hashP + 1) ^ *(hashP + 2) ^ *(hashP + 3)) + pos));
}
__device__ inline ulong reductFinalIndex(ulong index, uint8_t plainLength, uint8_t plainCharSize)
{
ulong res = 0;
uint8_t plainIndex[9];
for (int l = plainLength - 1; l >= 0; l--) {
// 32 - 126
plainIndex[l] = ((uint8_t)(index & 0x7f)) % plainCharSize + 32;
index >>= 7;
}
int j;
for (j = 0; j < plainLength - 1; j++) {
res += plainIndex[j];
res <<= 7;
}
res += plainIndex[j];
return res;
}
__global__ void generateChainPaperVersion(struct Chain* chains, const uint8_t plainCharSetSize,
const uint8_t plainLength, const unsigned int chainLength)
{
//initSHA256ConstantAndCharSet(plainCharSetSize);
unsigned char hash[32];
char plain[8];
uint offset = (blockIdx.x * blockDim.x) + threadIdx.x;
struct Chain* chain;
chain = chains + offset;
ulong indexE = chain->indexS;
for (int i = 0;i < chainLength;i++) {
//plainToHashWithInlinePTX((char *)&indexE, INDEX_SIZE_IN_BYTES, hash);
plainToHashWithInlinePTX(indexE, plainLength, hash, plainCharSetSize);
//indexE = hashToIndexWithoutCharSet(hash, i, plainCharSetSize);
indexE = hashToIndex(hash, i);
}
chain->indexE = reductFinalIndex(indexE,plainLength,plainCharSetSize);
//for (int i = 0;i < chainLength;i++) {
// indexToPlain(indexE, plainLength, plainCharSetSize, plain);
// plainToHashWithInlinePTX(plain, plainLength, hash);
// hashToIndex(hash, i);
//}
}
__global__ void generateChainPaperVersion(struct Chain* chains, const uint8_t plainCharSetSize,
const uint8_t plainLength, const unsigned int chainLength, ulong plainSpace)
{
//initSHA256ConstantAndCharSet(plainCharSetSize);
unsigned char hash[32];
char plain[8];
uint offset = (blockIdx.x * blockDim.x) + threadIdx.x;
struct Chain* chain;
chain = chains + offset;
ulong indexE = chain->indexS;
//for (int i = 0;i < chainLength;i++) {
// //plainToHashWithInlinePTX((char *)&indexE, INDEX_SIZE_IN_BYTES, hash);
// plainToHashWithInlinePTX(indexE, plainLength, hash, plainCharSetSize);
// //indexE = hashToIndexWithoutCharSet(hash, i, plainCharSetSize);
// indexE = hashToIndex(hash, i, 0x0fffffff);
//}
//chain->indexE = reductFinalIndex(indexE,plainLength,plainCharSetSize);
//}
for (int i = 0;i < chainLength;i++) {
indexToPlain(indexE, plainLength, plainCharSetSize, plain);
plainToHashWithInlinePTX(plain, plainLength, hash);
indexE = hashToIndex(hash, i, plainSpace);
}
chain->indexE = indexE;
}
__global__ void generateChain(struct PasswordMapping* chains, const uint8_t plainCharSetSize)
{
uint32_t offset = (blockIdx.x * blockDim.x) + threadIdx.x;
if (offset < plainCharSetSize) {
struct PasswordMapping* chain = chains + offset;
plainToHash(chain->plain, 1, chain->hash);
}else if (offset < plainCharSetSize + plainCharSetSize * plainCharSetSize) {
struct PasswordMapping* chain = chains + offset;
plainToHash(chain->plain, 2, chain->hash);
}else if (offset < plainCharSetSize + plainCharSetSize * plainCharSetSize + plainCharSetSize * plainCharSetSize * plainCharSetSize) {
struct PasswordMapping* chain = chains + offset;
plainToHash(chain->plain, 3, chain->hash);
}
}
void generateTableWhilePasswordLengthLowerOrEqualThan3(const char* hostCharSetPath , const uint8_t plainCharSetSize)
{
const uint32_t CHAINS_SIZE = plainCharSetSize + plainCharSetSize * plainCharSetSize + plainCharSetSize * plainCharSetSize * plainCharSetSize;
struct PasswordMapping* deviceChains;
struct PasswordMapping* hostChains;
char* hostCharSet;
CUDA_CALL(cudaHostAlloc(&hostChains, CHAINS_SIZE * sizeof(struct PasswordMapping), cudaHostAllocDefault));
CUDA_CALL(cudaHostAlloc(&hostCharSet, plainCharSetSize * sizeof(char), cudaHostAllocDefault));
getCharSet(hostCharSet, hostCharSetPath, plainCharSetSize);
generateInitialIndex(hostChains, hostCharSet, plainCharSetSize);
CUDA_CALL(cudaMalloc(&deviceChains, CHAINS_SIZE * sizeof(struct PasswordMapping)));
CUDA_CALL(cudaMemcpy(deviceChains, hostChains, CHAINS_SIZE * sizeof(struct PasswordMapping), cudaMemcpyHostToDevice));
uint32_t threadPerBlock = 384;
uint32_t blockNum = CHAINS_SIZE / threadPerBlock + 1;
cudaEvent_t startEvent;
cudaEvent_t endEvent;
float cudaElapsedTime = 0.0f;
cudaEventCreate(&startEvent);
cudaEventCreate(&endEvent);
cudaEventRecord(startEvent, 0);
generateChain<<<blockNum, threadPerBlock>>>(deviceChains, plainCharSetSize);
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&cudaElapsedTime, startEvent, endEvent);
QSort(deviceChains, CHAINS_SIZE);
CUDA_CALL(cudaMemcpy(hostChains, deviceChains, CHAINS_SIZE * sizeof(struct PasswordMapping), cudaMemcpyDeviceToHost));
writeToFile((string("../") + "1-3#" + "ascii-32-95#" + "1").c_str(), hostChains, sizeof(struct PasswordMapping), CHAINS_SIZE);
cudaFreeHost(hostChains);
cudaFreeHost(hostCharSet);
cudaFree(deviceChains);
//cudaEventDestroy(startEvent);
//cudaEventDestroy(endEvent);
cudaDeviceReset();
printf("%.3lf MH/S", (CHAINS_SIZE) / (cudaElapsedTime * 1000.0));
}
//int main()
//{
// generateTableWhilePasswordLengthLowerOrEqualThan3("../charsets/ascii-32-95.txt", 95);
//
// //constexpr uint32_t CHAINS_SIZE = 95 + 95 * 95 + 95 * 95 * 95;
// //struct PasswordMapping* mappings;
// //cudaHostAlloc(&mappings, sizeof(struct PasswordMapping) * CHAINS_SIZE, cudaHostAllocDefault);
// //openTableFile((string("../") + "1-3#" + "ascii-32-95#" + "1").c_str(), mappings, sizeof(struct PasswordMapping), CHAINS_SIZE);
// //for (int i = 0;i < CHAINS_SIZE;i++) {
// // printf("%s\n", mappings[i].hash);
// //}
// //getchar();
//
// return 0;
//}
void generateTable(const uint8_t plainLength, const char* hostCharSetPath, const uint8_t plainCharSetSize)
{
// chainSize = blockNum * threadPerBlock
// cover = chainSize * chainLength
const uint32_t threadPerBlock = 384;
uint32_t blockNum = 0;
uint32_t CHAINS_SIZE = 0;
uint32_t chainLength = 0;
// default plainCharSetSize == 95
// strategy
switch (plainLength) {
// the collision ration is quite high, especially when the plainLength is low
// the paper's reductant version is better when the plainLength is low (4,5,6)
// must split the table , even when the table size is low
case 4:
chainLength = 350;
CHAINS_SIZE = 384000;
blockNum = 1000;
break;
case 5:
chainLength = 3600;
CHAINS_SIZE = 2304000;
blockNum = 6000;
break;
case 6:
chainLength = 60000;
CHAINS_SIZE = 15360000;
blockNum = 40000;
}
struct Chain* devicePointer;
struct Chain* hostPointer;
char* hostCharSet;
char* deviceCharSet;
//CUDA_CALL(cudaHostAlloc(&hostPointer, CHAINS_SIZE * sizeof(struct Chain), cudaHostAllocDefault | cudaHostAllocMapped));
//CUDA_CALL(cudaHostAlloc(&hostCharSet, 36 * sizeof(char), cudaHostAllocDefault | cudaHostAllocMapped));
CUDA_CALL(cudaHostAlloc(&hostPointer, CHAINS_SIZE * sizeof(struct Chain), cudaHostAllocDefault));
CUDA_CALL(cudaHostAlloc(&hostCharSet, plainCharSetSize * sizeof(char), cudaHostAllocDefault));
getCharSet(hostCharSet, hostCharSetPath, plainCharSetSize);
generateInitialIndex(hostPointer, CHAINS_SIZE);
//printf("%llu", hostPointer[0].indexS);
CUDA_CALL(cudaMalloc(&devicePointer, CHAINS_SIZE * sizeof(struct Chain)));
CUDA_CALL(cudaMalloc(&deviceCharSet, plainCharSetSize * sizeof(char)));
CUDA_CALL(cudaMemcpy(deviceCharSet, hostCharSet, plainCharSetSize * sizeof(char), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(devicePointer, hostPointer, CHAINS_SIZE * sizeof(struct Chain), cudaMemcpyHostToDevice));
//CUDA_CALL(cudaMemcpyToSymbol(constantAreaPlainCharSet, hostCharSet, sizeof(char) * plainCharSetSize));
ulong plainSpace = pow(plainCharSetSize, plainLength);
cudaEvent_t startEvent;
cudaEvent_t endEvent;
float cudaElapsedTime = 0.0f;
cudaEventCreate(&startEvent);
cudaEventCreate(&endEvent);
cudaEventRecord(startEvent, 0);
generateChainPaperVersion << <blockNum, threadPerBlock >> > (devicePointer, plainCharSetSize, plainLength, chainLength);
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&cudaElapsedTime, startEvent, endEvent);
thrust::device_ptr<struct Chain> thrustChainP(devicePointer);
thrust::sort(thrustChainP, thrustChainP + CHAINS_SIZE, ChainComparator());
CUDA_CALL(cudaMemcpy(hostPointer, devicePointer, CHAINS_SIZE * sizeof(struct Chain), cudaMemcpyDeviceToHost));
struct Chain* forWrite;
CUDA_CALL(cudaHostAlloc(&forWrite, sizeof(struct Chain) * CHAINS_SIZE, cudaHostAllocDefault));
uint32_t actualSize = removeDuplicate(forWrite, hostPointer, CHAINS_SIZE);
writeToFile(fileNameBuilder("../", plainLength, hostCharSetPath, 1, actualSize, chainLength).c_str(), forWrite, sizeof(struct Chain), actualSize);
//writeToFile("../5#ascii-32-95#1#384000#350", hostPointer, sizeof(struct Chain), CHAINS_SIZE);
cudaFreeHost(hostPointer);
cudaFreeHost(hostCharSet);
cudaFree(deviceCharSet);
cudaFree(devicePointer);
//cudaEventDestroy(startEvent);
//cudaEventDestroy(endEvent);
cudaDeviceReset();
printf("%.3lf MH/S", (CHAINS_SIZE * (ulong)chainLength) / (cudaElapsedTime * 1000.0));
}
int main()
{
generateTable(4, "../charsets/ascii-32-95.txt", 95);
return 0;
}
//int main(int argc, char *argv[])
//{
// const uint CHAINS_SIZE = 7680000;
// int plainLength = 4;
// int chainLength = 100000;
//
// int plainCharSetSize = 95;
//
// //cudaSetDeviceFlags(cudaDeviceMapHost);
// struct Chain* devicePointer;
// struct Chain* hostPointer;
// char* hostCharSet;
// char* deviceCharSet;
// //CUDA_CALL(cudaHostAlloc(&hostPointer, CHAINS_SIZE * sizeof(struct Chain), cudaHostAllocDefault | cudaHostAllocMapped));
// //CUDA_CALL(cudaHostAlloc(&hostCharSet, 36 * sizeof(char), cudaHostAllocDefault | cudaHostAllocMapped));
// CUDA_CALL(cudaHostAlloc(&hostPointer, CHAINS_SIZE * sizeof(struct Chain), cudaHostAllocDefault));
// CUDA_CALL(cudaHostAlloc(&hostCharSet, plainCharSetSize * sizeof(char), cudaHostAllocDefault));
//
// getCharSet(hostCharSet, "../charsets/ascii-32-95.txt", plainCharSetSize);
//
// generateInitialIndex(hostPointer, CHAINS_SIZE);
//
// //printf("%llu", hostPointer[0].indexS);
//
// CUDA_CALL(cudaMalloc(&devicePointer, CHAINS_SIZE * sizeof(struct Chain)));
// CUDA_CALL(cudaMalloc(&deviceCharSet, plainCharSetSize * sizeof(char)));
//
// CUDA_CALL(cudaMemcpy(deviceCharSet, hostCharSet, plainCharSetSize * sizeof(char), cudaMemcpyHostToDevice));
// CUDA_CALL(cudaMemcpy(devicePointer, hostPointer, CHAINS_SIZE * sizeof(struct Chain), cudaMemcpyHostToDevice));
//
// CUDA_CALL(cudaMemcpyToSymbol(constantAreaPlainCharSet, hostCharSet, sizeof(char) * plainCharSetSize));
//
// /*curandGenerator_t randGeneratorDevice;
// const ulong seed = 987654321;
// const curandRngType_t generatorType = CURAND_RNG_PSEUDO_DEFAULT;
//
// curandCreateGenerator(&randGeneratorDevice, generatorType);
// curandSetPseudoRandomGeneratorSeed(randGeneratorDevice, seed);
// curandGenerateLongLong(randGeneratorDevice, (ulong *)devicePointer, CHAINS_SIZE * 2);*/
//
// int threadPerBlock = 384;
// uint blockNum = CHAINS_SIZE / threadPerBlock;
//
// cudaEvent_t startEvent;
// cudaEvent_t endEvent;
// float cudaElapsedTime = 0.0f;
// cudaEventCreate(&startEvent);
// cudaEventCreate(&endEvent);
// cudaEventRecord(startEvent, 0);
//
// generateChainPaperVersion << <blockNum, threadPerBlock >> > (devicePointer, plainCharSetSize, plainLength, chainLength);
//
// cudaEventRecord(endEvent, 0);
// cudaEventSynchronize(endEvent);
// cudaEventElapsedTime(&cudaElapsedTime, startEvent, endEvent);
//
// thrust::device_ptr<struct Chain> thrustChainP(devicePointer);
// thrust::sort(thrustChainP, thrustChainP + CHAINS_SIZE, ChainComparator());
//
// CUDA_CALL(cudaMemcpy(hostPointer, devicePointer, CHAINS_SIZE * sizeof(struct Chain), cudaMemcpyDeviceToHost));
//
// // plainLength#charSet#table#tableLength#chainLength
//
// // 1-3#ascii-32-95#1#0#chainLength
// //writeToFile((string("../") + "1-3#" + "ascii-32-95#" + "1#" + "0#" + ).c_str(), hostPointer, sizeof(struct Chain), CHAINS_SIZE);
// writeToFile("../t5.rt", hostPointer, sizeof(struct Chain), CHAINS_SIZE);
//
//
// cudaFreeHost(hostPointer);
// cudaFreeHost(hostCharSet);
// cudaFree(deviceCharSet);
// cudaFree(devicePointer);
// //cudaEventDestroy(startEvent);
// //cudaEventDestroy(endEvent);
//
// cudaDeviceReset();
//
// printf("%.3lf MH/S", (CHAINS_SIZE * (ulong)chainLength) / (cudaElapsedTime * 1000.0));
//
// getchar();
//
//
//
// return 0;
//}
|
3b21e60edbea1f322c15774f2b3e808b347d393a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////
// rmta_gpu.cu
//
// Provides functions for microarray analysis
// using random matrix theory
// on the GPU.
//
// Author: Yun Zhang
// Date Created: November 29, 2011
// Last Modified: April 18, 2012
////////////////////////////////////////////////////////////
//include necessary files
#include "config.h"
#include <cublas_eig.cu>
#include <varimax.cu>
//include necessary external functions
extern int culaSstein(float *eigenvalues, float *eigenvectors,
float *matrix, int n,float vl);
extern int checkStatus2(culaStatus status);
int culaSstein2(float *eigenvalues, float *eigenvectors, float *matrix, int k, int n)
{
int *ifail = new int[n];
culaStatus status;
float vl = eigenvalues[n-k]+1;
float vu = eigenvalues[n-1]+0.1;
float abstol=0.0;
int m = 0;
float* w = new float[n];
float* z = new float[n*n];
status = culaSsyevx('V', 'I', 'L', n, matrix, n,vl, vu, n-k+1, n, abstol, &m, w, z, n, ifail);
int result =checkStatus2(status);
int count = 0;
for(int i = k-1; i >= 0; i--)
{
for(int j = 0; j < n; j++)
{
eigenvectors[count*n+j] = z[i*n+j];
}
count++;
}
//float temp;
for(int i=0; i< m; i++){
printf("%f ", w[i]);
eigenvalues[i] = w[m-1-i];
}
for(int i = k; i < n; i++)
{
for(int j = 0; j < n; j++)
{
eigenvectors[i*n+j] = 0.0;
}
}
delete[] ifail;
delete[] w;
delete[] z;
return result;
}
////////////////////////////////////////////////////////////
//
// Kernel function. Compute row averages of g_data.
//
// @param g_data input dataset
// @param average contains computed row averages
// @param rows # of rows of g_data
// @param cols # of columns of g_data
////////////////////////////////////////////////////////////
__global__ void GetAverages(float* g_data, float* average, int rows, int cols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < rows)
{
float avg = 0.0;
for(int j=0;j<cols;j++)
{
avg += g_data[(idx*cols)+j];
}
average[idx] = avg / cols;
}
}
////////////////////////////////////////////////////////////
//
// Kernel function. Compute row standard deviations
// of g_data.
//
// @param g_data input dataset
// @param average contains computed row averages
// @param stdev contains computed standard deviations
// @param rows # of rows of g_data
// @param cols # of columns of g_data
////////////////////////////////////////////////////////////
__global__ void GetStdev(float* g_data, float* average, float* stdev, int rows, int cols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < rows)
{
float std = 0.0;
for(int j=0;j<cols;j++)
{
std += powf(g_data[idx*cols+j] - average[idx],2);
}
stdev[idx] = (float) sqrtf(fabs(std/(cols-1)));
if(stdev[idx] == 0) stdev[idx] = 1;
}
}
////////////////////////////////////////////////////////////
//
// Compute Pearson correlation matrix of g_data.
//
// @param g_data input dataset
// @param average contains computed row averages
// @param stdev contains computed standard deviations
// @param pearson contains Pearson correlation matrix
// @param rows # of rows of g_data
// @param cols # of columns of g_data
////////////////////////////////////////////////////////////
__global__ void ComputeCorrelation(float* g_data, float* average, float* stdev, float* pearson, int rows, int cols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < rows && idy < rows && idx <= idy)
{
float pear = 0.0;
for(int j = 0; j < cols; j++)
{
pear += (g_data[idx*cols+j] - average[idx])*(g_data[idy*cols+j]-average[idy]);
}
pear = pear / ((cols-1) * stdev[idx] * stdev[idy]);
pearson[(idx*rows)+idy] = (float) pear;
pearson[(idy*rows)+idx] = pear;
}
}
////////////////////////////////////////////////////////////
//
// Kernel function. Transforms eigenvectors to component
// loadings.
//
// @param eigenvectors eigenvectors to be transformed
// @param eigenvalues corresponding eigenvalues
// @param rows # of rows of eigenvectors
// @param cols # of columns of eigenvectors
////////////////////////////////////////////////////////////
__global__ void Transform(float *eigenvectors, float *eigenvalues, int rows, int cols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < rows && idy < rows)
{
eigenvectors[idx*rows+idy] = eigenvectors[idx*rows+idy] * sqrtf(fabs(eigenvalues[idy]));
}
}
////////////////////////////////////////////////////////////
//
// Kernel function. Transposes matrix in into matrix out.
// Modified from CUDA SDK Code samples.
//
// @param in input matrix
// @param out output transposed matrix in
// @param rows # of rows of in and out
// @param cols # of columns of in and out
////////////////////////////////////////////////////////////
__global__ void Transpose(float *in, float *out, int rows, int cols)
{
__shared__ float block[TWOD_THREADS][TWOD_THREADS];
// read the matrix tile into shared memory
int xIndex = blockIdx.x * TWOD_THREADS + threadIdx.x;
int yIndex = blockIdx.y * TWOD_THREADS + threadIdx.y;
if((xIndex < cols) && (yIndex < rows))
{
int index_in = yIndex * cols + xIndex;
block[threadIdx.y][threadIdx.x] = in[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * TWOD_THREADS + threadIdx.x;
yIndex = blockIdx.x * TWOD_THREADS + threadIdx.y;
if((xIndex < rows) && (yIndex < cols))
{
int index_out = yIndex * rows + xIndex;
out[index_out] = block[threadIdx.x][threadIdx.y];
}
}
////////////////////////////////////////////////////////////
//
// Kernel function. Get diagonal elements of matrix M.
//
// @param M input matrix
// @param diags output, diagonal elements
// @param n # of rows and columns of M
////////////////////////////////////////////////////////////
__global__ void GetDiagonals(float *M, float *diags, int n)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < n)
{
diags[idx] = M[idx*n+idx];
}
}
////////////////////////////////////////////////////////////
//
// Kernel function. Get superdiagonals of matrix M
//
// @param M input matrix
// @param sd output, superdiagonal elements
// @param n # of rows and columns of M
////////////////////////////////////////////////////////////
__global__ void GetSuperDiagonals(float *M, float *sd, int n)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < n && idx > 0)
{
sd[idx-1] = M[idx*n+(idx-1)];
}
}
////////////////////////////////////////////////////////////
//
// Kernel wrapper function. Computes Pearson correlation
// matrix of data.
//
// @param data input data
// @param pearson outputted Pearson matrix
// @param rows # of rows of data, # of rows and
// columns of pearson
// @param cols # of columns of data
////////////////////////////////////////////////////////////
void gpu_pearson(float *data, float *pearson, int rows, int cols)
{
//setup execution parameters
dim3 dimGrid(1,1,1);
dim3 dimBlock(1,1,1);
float *average;
float *stdev;
//allocate memory
hipMalloc((void**)&average, rows*sizeof(float));
hipMemset(average, 0, rows*sizeof(float));
hipMalloc((void**)&stdev, rows*sizeof(float));
hipMemset(stdev, 0, rows*sizeof(float));
hipMemset(pearson, 0, rows*rows*sizeof(float));
//compute number of blocks and number of threads
dimBlock.x = TWOD_THREADS;
dimBlock.y = TWOD_THREADS;
dimGrid.x = (rows+dimBlock.x-1) / dimBlock.x;
dimGrid.y = dimGrid.x;
//compute Pearson correlations
hipLaunchKernelGGL(( GetAverages), dim3(dimGrid),dim3(dimBlock), 0, 0, data, average, rows, cols);
hipDeviceSynchronize();
hipLaunchKernelGGL(( GetStdev), dim3(dimGrid),dim3(dimBlock), 0, 0, data, average, stdev, rows, cols);
hipDeviceSynchronize();
hipLaunchKernelGGL(( ComputeCorrelation), dim3(dimGrid),dim3(dimBlock), 0, 0, data,average,stdev,
pearson, rows, cols);
//release memory
hipFree(average);
hipFree(stdev);
average = NULL;
stdev = NULL;
}
////////////////////////////////////////////////////////////
//
// Kernel wrapper function. Transforms eigenvectors to
// component loadings.
//
// @param eigenvectors eigenvectors to be transformed
// @param eigenvalues corresponding eigenvalues
// @param rows # of rows of eigenvectors
// @param cols # of columns of eigenvectors
////////////////////////////////////////////////////////////
void gpu_transform_eigenvectors(float *eigenvectors, float *eigenvalues, int rows, int cols)
{
// setup execution parameters
dim3 dimGrid(1,1,1);
dim3 dimBlock(1,1,1);
dimBlock.x = TWOD_THREADS;
dimBlock.y = TWOD_THREADS;
dimGrid.x = (rows+dimBlock.x-1) / dimBlock.x;
dimGrid.y = dimGrid.x;
hipLaunchKernelGGL(( Transform), dim3(dimGrid), dim3(dimBlock) , 0, 0, eigenvectors, eigenvalues, rows, cols);
}
////////////////////////////////////////////////////////////
//
// Kernel wrapper function. Transposes matrix in
// into matrix out.
//
// @param in input matrix
// @param out output transposed matrix in
// @param rows # of rows of in and out
// @param cols # of columns of in and out
////////////////////////////////////////////////////////////
void gpu_transpose(float *in, float *out, int rows, int cols)
{
// setup execution parameters
dim3 dimGrid(1,1,1);
dim3 dimBlock(1,1,1);
dimBlock.x = TWOD_THREADS;
dimBlock.y = TWOD_THREADS;
dimGrid.x = (rows+dimBlock.x-1) / dimBlock.x;
dimGrid.y = dimGrid.x;
hipLaunchKernelGGL(( Transpose), dim3(dimGrid), dim3(dimBlock) , 0, 0, in, out, rows, cols);
}
////////////////////////////////////////////////////////////
//
// Kernel wrapper function.
// Get diagonal elements of matrix M.
//
// @param M input matrix
// @param diags output, diagonal elements
// @param n # of rows and columns of M
////////////////////////////////////////////////////////////
void gpu_get_diagonals(float *M, float *diagonals, int n)
{
// setup execution parameters
dim3 dimGrid(1,1,1);
dim3 dimBlock(1,1,1);
dimBlock.x = ONED_THREADS;
dimGrid.x = n / dimBlock.x + (n % dimBlock.x == 0 ? 0 : 1);
hipLaunchKernelGGL(( GetDiagonals), dim3(dimGrid), dim3(dimBlock) , 0, 0, M, diagonals, n);
}
////////////////////////////////////////////////////////////
//
// Kernel wrapper function.
// Get superdiagonal elements of matrix M.
//
// @param M input matrix
// @param supdiags output, superdiag. elements
// @param n # of rows and columns of M
////////////////////////////////////////////////////////////
void gpu_get_superdiagonals(float *M, float *supdiags, int n)
{
// setup execution parameters
dim3 grid(1,1,1);
dim3 block(1,1,1);
block.x = ONED_THREADS;
grid.x = n / block.x + (n % block.x == 0 ? 0 : 1);
hipLaunchKernelGGL(( GetSuperDiagonals), dim3(grid), dim3(block) , 0, 0, M, supdiags, n);
}
////////////////////////////////////////////////////////////
//
// Prints current free memory on the GPU.
//
////////////////////////////////////////////////////////////
void print_mem()
{
uint free, total;
cuMemGetInfo(&free, &total);
printf("\t\t Current Free GPU Memory: %i MB\n", (free>>20));
}
////////////////////////////////////////////////////////////
//
// Entry point for GPU RMT algorithm.
//
// @param data loaded dataset
// @param random loaded random matrix
// @param pearson_out data Pearson matrix (output)
// @param pear_in true if data is Pearson matrix
// @param curr_step current step in algorithm
// @param cols # of columns of data
// @param rows # of rows of data
////////////////////////////////////////////////////////////
extern "C" int runRMT(float *data, float *random, float *pearson_out, float *rotations, bool pear_in, int curr_step, int cols, int rows)
{
//set device
hipSetDevice(cutGetMaxGflopsDeviceId());
cublasStatus stat;
culaStatus status;
status = culaInitialize();
checkStatus2(status);
//declare variables
float *g_data;
float *g_workm;
float *g_workv;
float *g_transform;
float *g_transpose;
float *cd_eigen = new float[rows];
float *cr_eigen = new float[rows];
float *c_diags = new float[rows];
float *c_superdiags = new float[rows-1];
int meaningful;
const int data_mem_size = sizeof(float) * rows * cols;
const int pearson_mem_size = sizeof(float) * rows * rows;
const int rows_mem_size = sizeof(float) * rows;
int return_value;
//allocate device memory
hipMalloc((void**)&g_data, data_mem_size);
hipMemset(g_data, 0, data_mem_size);
hipMalloc((void**)&g_workm, pearson_mem_size);
hipMemset(g_workm, 0, pearson_mem_size);
hipMalloc((void**)&g_workv, rows_mem_size);
hipMemset(g_workm, 0, rows_mem_size);
hipMalloc((void**)&g_transform, pearson_mem_size);
hipMemset(g_transform, 0, pearson_mem_size);
hipMalloc((void**)&g_transpose, pearson_mem_size);
hipMemset(g_transpose, 0, pearson_mem_size);
if(rows >= LARGE_MATRIX) print_mem();
if(!pear_in)
{
// copy host memory to device
hipMemcpy(g_data, random, data_mem_size, hipMemcpyHostToDevice);
printf("(On GPU) Step %i: Calculating Pearson correlation coefficients for random matrix...", curr_step);
fflush(stdout);
gpu_pearson(g_data, g_workm, rows, cols);
printf("Done.\n");
fflush(stdout);
curr_step++;
}
else
{
hipMemcpy(g_workm, random, pearson_mem_size, hipMemcpyHostToDevice);
}
if(rows >= LARGE_MATRIX) print_mem();
//printf("(On GPU)\t Reducing random matrix to tridiagonal form...");
//fflush(stdout);
stat = hipblasInit();
if(stat != HIPBLAS_STATUS_SUCCESS) return EXIT_FAILURE;
printf("(On CPU) Step %i: Computing eigenvalues for Random matrice...", curr_step);
fflush(stdout);
float *matrix = new float[pearson_mem_size];
/*
* compute eigenvalues for the random matrix
*/
gpu_transpose(g_workm, g_transpose, rows, rows);
hipMemcpy(matrix, g_transpose, pearson_mem_size, hipMemcpyDeviceToHost);
culaSsyev('N', 'L', rows, matrix, rows, cr_eigen);
return_value = checkStatus2(status);
gpu_transpose(g_transpose, g_workm, rows, rows);
printf("Done.\n");
fflush(stdout);
if(return_value == EXIT_FAILURE)
{
printf("\nOut of memory!!! Try running again...\n");
fflush(stdout);
hipFree(g_workm);
g_workm = NULL;
hipFree(g_transform);
g_transform = NULL;
hipFree(g_data);
g_data = NULL;
delete[] cd_eigen;
delete[] cr_eigen;
delete[] c_diags;
delete[] c_superdiags;
hipblasShutdown();
hipDeviceReset();
return EXIT_FAILURE;
}
if(!pear_in)
{
hipMemset(g_data, 0, pearson_mem_size);
hipMemcpy(g_data, data, data_mem_size, hipMemcpyHostToDevice);
printf("(On GPU)\t Calculating Pearson correlation coefficients for data matrix...", curr_step);
fflush(stdout);
gpu_pearson(g_data, g_workm, rows, cols);
//free unneeded memory
hipFree(g_data);
g_data = NULL;
hipMemcpy(pearson_out, g_workm, pearson_mem_size, hipMemcpyDeviceToHost);
printf("Done.\n");
fflush(stdout);
}
else
{
//free unneeded memory
hipFree(g_data);
g_data = NULL;
memcpy(pearson_out, data, data_mem_size);
hipMemcpy(g_workm, data, data_mem_size, hipMemcpyHostToDevice);
}
if(rows >= LARGE_MATRIX) print_mem();
printf("Done.\n");
fflush(stdout);
/*
* compute eigenvalues for the data matrix
*/
printf("(On CPU) Step %i: Computing eigenvalues for data matrice...", curr_step);
fflush(stdout);
float *pw = new float[rows];
/*
* compute eigenvalues for the data matrix
*/
gpu_transpose(g_workm, g_transpose, rows, rows);
hipMemcpy(matrix, g_transpose, pearson_mem_size, hipMemcpyDeviceToHost);
culaSsyev('N', 'L', rows, matrix, rows, cd_eigen);
checkStatus2(status);
gpu_transpose(g_transpose, g_workm, rows, rows);
printf("Done.\n");
fflush(stdout);
if(return_value == EXIT_FAILURE)
{
printf("\nOut of memory!!! Try running again...\n");
fflush(stdout);
hipFree(g_workm);
g_workm = NULL;
hipFree(g_transform);
g_transform = NULL;
delete[] cd_eigen;
delete[] cr_eigen;
delete[] c_diags;
delete[] c_superdiags;
hipblasShutdown();
hipDeviceReset();
return EXIT_FAILURE;
}
printf("\t\t Data Eigenvalues\tRandom Eigenvalues (Last 5)\n");
for(int i = rows-4; i < rows; i++) printf("\t\t %f\t\t%f\n", cd_eigen[i], cr_eigen[i]);
if(pear_in||cd_eigen[rows-2]<cr_eigen[rows-1])
{
if(rows < K)
meaningful = rows;
else
meaningful = K;
printf("(On CPU)\t Computing tridiagonal eigenvectors for the K largest eigenvalues (K = %i)...", meaningful);
fflush(stdout);
culaSstein2(cd_eigen, rotations, matrix, meaningful,rows);
}else{
printf("(On CPU)\t Computing tridiagonal eigenvectors for the eigenvalues larger than the largest element in random %f ", cr_eigen[4]);
fflush(stdout);
meaningful = culaSstein(cd_eigen, rotations, matrix, rows, cr_eigen[rows-1]);
}
checkStatus2(status);
hipMemcpy(g_transpose, rotations, pearson_mem_size, hipMemcpyHostToDevice);
printf("Done.\n");
fflush(stdout);
printf("(On GPU)\t Backtransforming tridiagonal eigenvectors to those of the original matrix...");
fflush(stdout);
return_value = cublasSsormtr(g_transpose, g_transform, meaningful, rows);
gpu_transpose(g_transpose, g_workm, rows, rows);
printf("Done.\n");
fflush(stdout);
//transform eigenvectors to loadings
printf("(On GPU) Step %i: Transforming eigenvectors to component loadings...", curr_step);
fflush(stdout);
hipFree(g_transform);
g_transform = NULL;
hipMemcpy(g_workv, cd_eigen, meaningful*sizeof(float), hipMemcpyHostToDevice);
gpu_transform_eigenvectors(g_workm, g_workv, rows, rows);
printf("Done.\n");
fflush(stdout);
curr_step++;
hipFree(g_workv);
g_workv = NULL;
if(rows >= LARGE_MATRIX) print_mem();
//rotate loadings using varimax
printf("(On GPU) Step %i: Orthogonal rotation using Varimax...", curr_step);
fflush(stdout);
gpu_transpose(g_workm, g_transpose, rows, rows);
gpu_varimax(g_transpose, rows, meaningful);
gpu_transpose(g_transpose, g_workm, rows, rows);
printf("Done.\n\n");
fflush(stdout);
//copy results from device to host
hipMemcpy(rotations, g_workm, pearson_mem_size, hipMemcpyDeviceToHost);
// cleanup memory
hipFree(g_workm);
g_workm = NULL;
hipFree(g_transform);
g_transform = NULL;
delete[] cd_eigen;
delete[] cr_eigen;
delete[] c_diags;
delete[] c_superdiags;
hipblasShutdown();
checkStatus2(status);
culaShutdown();
hipDeviceReset();
return meaningful;
}
//end rmta_gpu.cu
| 3b21e60edbea1f322c15774f2b3e808b347d393a.cu | ////////////////////////////////////////////////////////////
// rmta_gpu.cu
//
// Provides functions for microarray analysis
// using random matrix theory
// on the GPU.
//
// Author: Yun Zhang
// Date Created: November 29, 2011
// Last Modified: April 18, 2012
////////////////////////////////////////////////////////////
//include necessary files
#include "config.h"
#include <cublas_eig.cu>
#include <varimax.cu>
//include necessary external functions
extern int culaSstein(float *eigenvalues, float *eigenvectors,
float *matrix, int n,float vl);
extern int checkStatus2(culaStatus status);
int culaSstein2(float *eigenvalues, float *eigenvectors, float *matrix, int k, int n)
{
int *ifail = new int[n];
culaStatus status;
float vl = eigenvalues[n-k]+1;
float vu = eigenvalues[n-1]+0.1;
float abstol=0.0;
int m = 0;
float* w = new float[n];
float* z = new float[n*n];
status = culaSsyevx('V', 'I', 'L', n, matrix, n,vl, vu, n-k+1, n, abstol, &m, w, z, n, ifail);
int result =checkStatus2(status);
int count = 0;
for(int i = k-1; i >= 0; i--)
{
for(int j = 0; j < n; j++)
{
eigenvectors[count*n+j] = z[i*n+j];
}
count++;
}
//float temp;
for(int i=0; i< m; i++){
printf("%f ", w[i]);
eigenvalues[i] = w[m-1-i];
}
for(int i = k; i < n; i++)
{
for(int j = 0; j < n; j++)
{
eigenvectors[i*n+j] = 0.0;
}
}
delete[] ifail;
delete[] w;
delete[] z;
return result;
}
////////////////////////////////////////////////////////////
//
// Kernel function. Compute row averages of g_data.
//
// @param g_data input dataset
// @param average contains computed row averages
// @param rows # of rows of g_data
// @param cols # of columns of g_data
////////////////////////////////////////////////////////////
__global__ void GetAverages(float* g_data, float* average, int rows, int cols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < rows)
{
float avg = 0.0;
for(int j=0;j<cols;j++)
{
avg += g_data[(idx*cols)+j];
}
average[idx] = avg / cols;
}
}
////////////////////////////////////////////////////////////
//
// Kernel function. Compute row standard deviations
// of g_data.
//
// @param g_data input dataset
// @param average contains computed row averages
// @param stdev contains computed standard deviations
// @param rows # of rows of g_data
// @param cols # of columns of g_data
////////////////////////////////////////////////////////////
__global__ void GetStdev(float* g_data, float* average, float* stdev, int rows, int cols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < rows)
{
float std = 0.0;
for(int j=0;j<cols;j++)
{
std += powf(g_data[idx*cols+j] - average[idx],2);
}
stdev[idx] = (float) sqrtf(fabs(std/(cols-1)));
if(stdev[idx] == 0) stdev[idx] = 1;
}
}
////////////////////////////////////////////////////////////
//
// Compute Pearson correlation matrix of g_data.
//
// @param g_data input dataset
// @param average contains computed row averages
// @param stdev contains computed standard deviations
// @param pearson contains Pearson correlation matrix
// @param rows # of rows of g_data
// @param cols # of columns of g_data
////////////////////////////////////////////////////////////
__global__ void ComputeCorrelation(float* g_data, float* average, float* stdev, float* pearson, int rows, int cols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < rows && idy < rows && idx <= idy)
{
float pear = 0.0;
for(int j = 0; j < cols; j++)
{
pear += (g_data[idx*cols+j] - average[idx])*(g_data[idy*cols+j]-average[idy]);
}
pear = pear / ((cols-1) * stdev[idx] * stdev[idy]);
pearson[(idx*rows)+idy] = (float) pear;
pearson[(idy*rows)+idx] = pear;
}
}
////////////////////////////////////////////////////////////
//
// Kernel function. Transforms eigenvectors to component
// loadings.
//
// @param eigenvectors eigenvectors to be transformed
// @param eigenvalues corresponding eigenvalues
// @param rows # of rows of eigenvectors
// @param cols # of columns of eigenvectors
////////////////////////////////////////////////////////////
__global__ void Transform(float *eigenvectors, float *eigenvalues, int rows, int cols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < rows && idy < rows)
{
eigenvectors[idx*rows+idy] = eigenvectors[idx*rows+idy] * sqrtf(fabs(eigenvalues[idy]));
}
}
////////////////////////////////////////////////////////////
//
// Kernel function. Transposes matrix in into matrix out.
// Modified from CUDA SDK Code samples.
//
// @param in input matrix
// @param out output transposed matrix in
// @param rows # of rows of in and out
// @param cols # of columns of in and out
////////////////////////////////////////////////////////////
__global__ void Transpose(float *in, float *out, int rows, int cols)
{
__shared__ float block[TWOD_THREADS][TWOD_THREADS];
// read the matrix tile into shared memory
int xIndex = blockIdx.x * TWOD_THREADS + threadIdx.x;
int yIndex = blockIdx.y * TWOD_THREADS + threadIdx.y;
if((xIndex < cols) && (yIndex < rows))
{
int index_in = yIndex * cols + xIndex;
block[threadIdx.y][threadIdx.x] = in[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * TWOD_THREADS + threadIdx.x;
yIndex = blockIdx.x * TWOD_THREADS + threadIdx.y;
if((xIndex < rows) && (yIndex < cols))
{
int index_out = yIndex * rows + xIndex;
out[index_out] = block[threadIdx.x][threadIdx.y];
}
}
////////////////////////////////////////////////////////////
//
// Kernel function. Get diagonal elements of matrix M.
//
// @param M input matrix
// @param diags output, diagonal elements
// @param n # of rows and columns of M
////////////////////////////////////////////////////////////
__global__ void GetDiagonals(float *M, float *diags, int n)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < n)
{
diags[idx] = M[idx*n+idx];
}
}
////////////////////////////////////////////////////////////
//
// Kernel function. Get superdiagonals of matrix M
//
// @param M input matrix
// @param sd output, superdiagonal elements
// @param n # of rows and columns of M
////////////////////////////////////////////////////////////
__global__ void GetSuperDiagonals(float *M, float *sd, int n)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < n && idx > 0)
{
sd[idx-1] = M[idx*n+(idx-1)];
}
}
////////////////////////////////////////////////////////////
//
// Kernel wrapper function. Computes Pearson correlation
// matrix of data.
//
// @param data input data
// @param pearson outputted Pearson matrix
// @param rows # of rows of data, # of rows and
// columns of pearson
// @param cols # of columns of data
////////////////////////////////////////////////////////////
void gpu_pearson(float *data, float *pearson, int rows, int cols)
{
//setup execution parameters
dim3 dimGrid(1,1,1);
dim3 dimBlock(1,1,1);
float *average;
float *stdev;
//allocate memory
cudaMalloc((void**)&average, rows*sizeof(float));
cudaMemset(average, 0, rows*sizeof(float));
cudaMalloc((void**)&stdev, rows*sizeof(float));
cudaMemset(stdev, 0, rows*sizeof(float));
cudaMemset(pearson, 0, rows*rows*sizeof(float));
//compute number of blocks and number of threads
dimBlock.x = TWOD_THREADS;
dimBlock.y = TWOD_THREADS;
dimGrid.x = (rows+dimBlock.x-1) / dimBlock.x;
dimGrid.y = dimGrid.x;
//compute Pearson correlations
GetAverages<<<dimGrid,dimBlock>>>(data, average, rows, cols);
cudaThreadSynchronize();
GetStdev<<<dimGrid,dimBlock>>>(data, average, stdev, rows, cols);
cudaThreadSynchronize();
ComputeCorrelation<<<dimGrid,dimBlock>>>(data,average,stdev,
pearson, rows, cols);
//release memory
cudaFree(average);
cudaFree(stdev);
average = NULL;
stdev = NULL;
}
////////////////////////////////////////////////////////////
//
// Kernel wrapper function. Transforms eigenvectors to
// component loadings.
//
// @param eigenvectors eigenvectors to be transformed
// @param eigenvalues corresponding eigenvalues
// @param rows # of rows of eigenvectors
// @param cols # of columns of eigenvectors
////////////////////////////////////////////////////////////
void gpu_transform_eigenvectors(float *eigenvectors, float *eigenvalues, int rows, int cols)
{
// setup execution parameters
dim3 dimGrid(1,1,1);
dim3 dimBlock(1,1,1);
dimBlock.x = TWOD_THREADS;
dimBlock.y = TWOD_THREADS;
dimGrid.x = (rows+dimBlock.x-1) / dimBlock.x;
dimGrid.y = dimGrid.x;
Transform<<< dimGrid, dimBlock >>>(eigenvectors, eigenvalues, rows, cols);
}
////////////////////////////////////////////////////////////
//
// Kernel wrapper function. Transposes matrix in
// into matrix out.
//
// @param in input matrix
// @param out output transposed matrix in
// @param rows # of rows of in and out
// @param cols # of columns of in and out
////////////////////////////////////////////////////////////
void gpu_transpose(float *in, float *out, int rows, int cols)
{
// setup execution parameters
dim3 dimGrid(1,1,1);
dim3 dimBlock(1,1,1);
dimBlock.x = TWOD_THREADS;
dimBlock.y = TWOD_THREADS;
dimGrid.x = (rows+dimBlock.x-1) / dimBlock.x;
dimGrid.y = dimGrid.x;
Transpose<<< dimGrid, dimBlock >>>(in, out, rows, cols);
}
////////////////////////////////////////////////////////////
//
// Kernel wrapper function.
// Get diagonal elements of matrix M.
//
// @param M input matrix
// @param diags output, diagonal elements
// @param n # of rows and columns of M
////////////////////////////////////////////////////////////
void gpu_get_diagonals(float *M, float *diagonals, int n)
{
// setup execution parameters
dim3 dimGrid(1,1,1);
dim3 dimBlock(1,1,1);
dimBlock.x = ONED_THREADS;
dimGrid.x = n / dimBlock.x + (n % dimBlock.x == 0 ? 0 : 1);
GetDiagonals<<< dimGrid, dimBlock >>>(M, diagonals, n);
}
////////////////////////////////////////////////////////////
//
// Kernel wrapper function.
// Get superdiagonal elements of matrix M.
//
// @param M input matrix
// @param supdiags output, superdiag. elements
// @param n # of rows and columns of M
////////////////////////////////////////////////////////////
void gpu_get_superdiagonals(float *M, float *supdiags, int n)
{
// setup execution parameters
dim3 grid(1,1,1);
dim3 block(1,1,1);
block.x = ONED_THREADS;
grid.x = n / block.x + (n % block.x == 0 ? 0 : 1);
GetSuperDiagonals<<< grid, block >>>(M, supdiags, n);
}
////////////////////////////////////////////////////////////
//
// Prints current free memory on the GPU.
//
////////////////////////////////////////////////////////////
void print_mem()
{
uint free, total;
cuMemGetInfo(&free, &total);
printf("\t\t Current Free GPU Memory: %i MB\n", (free>>20));
}
////////////////////////////////////////////////////////////
//
// Entry point for GPU RMT algorithm.
//
// @param data loaded dataset
// @param random loaded random matrix
// @param pearson_out data Pearson matrix (output)
// @param pear_in true if data is Pearson matrix
// @param curr_step current step in algorithm
// @param cols # of columns of data
// @param rows # of rows of data
////////////////////////////////////////////////////////////
extern "C" int runRMT(float *data, float *random, float *pearson_out, float *rotations, bool pear_in, int curr_step, int cols, int rows)
{
//set device
cudaSetDevice(cutGetMaxGflopsDeviceId());
cublasStatus stat;
culaStatus status;
status = culaInitialize();
checkStatus2(status);
//declare variables
float *g_data;
float *g_workm;
float *g_workv;
float *g_transform;
float *g_transpose;
float *cd_eigen = new float[rows];
float *cr_eigen = new float[rows];
float *c_diags = new float[rows];
float *c_superdiags = new float[rows-1];
int meaningful;
const int data_mem_size = sizeof(float) * rows * cols;
const int pearson_mem_size = sizeof(float) * rows * rows;
const int rows_mem_size = sizeof(float) * rows;
int return_value;
//allocate device memory
cudaMalloc((void**)&g_data, data_mem_size);
cudaMemset(g_data, 0, data_mem_size);
cudaMalloc((void**)&g_workm, pearson_mem_size);
cudaMemset(g_workm, 0, pearson_mem_size);
cudaMalloc((void**)&g_workv, rows_mem_size);
cudaMemset(g_workm, 0, rows_mem_size);
cudaMalloc((void**)&g_transform, pearson_mem_size);
cudaMemset(g_transform, 0, pearson_mem_size);
cudaMalloc((void**)&g_transpose, pearson_mem_size);
cudaMemset(g_transpose, 0, pearson_mem_size);
if(rows >= LARGE_MATRIX) print_mem();
if(!pear_in)
{
// copy host memory to device
cudaMemcpy(g_data, random, data_mem_size, cudaMemcpyHostToDevice);
printf("(On GPU) Step %i: Calculating Pearson correlation coefficients for random matrix...", curr_step);
fflush(stdout);
gpu_pearson(g_data, g_workm, rows, cols);
printf("Done.\n");
fflush(stdout);
curr_step++;
}
else
{
cudaMemcpy(g_workm, random, pearson_mem_size, cudaMemcpyHostToDevice);
}
if(rows >= LARGE_MATRIX) print_mem();
//printf("(On GPU)\t Reducing random matrix to tridiagonal form...");
//fflush(stdout);
stat = cublasInit();
if(stat != CUBLAS_STATUS_SUCCESS) return EXIT_FAILURE;
printf("(On CPU) Step %i: Computing eigenvalues for Random matrice...", curr_step);
fflush(stdout);
float *matrix = new float[pearson_mem_size];
/*
* compute eigenvalues for the random matrix
*/
gpu_transpose(g_workm, g_transpose, rows, rows);
cudaMemcpy(matrix, g_transpose, pearson_mem_size, cudaMemcpyDeviceToHost);
culaSsyev('N', 'L', rows, matrix, rows, cr_eigen);
return_value = checkStatus2(status);
gpu_transpose(g_transpose, g_workm, rows, rows);
printf("Done.\n");
fflush(stdout);
if(return_value == EXIT_FAILURE)
{
printf("\nOut of memory!!! Try running again...\n");
fflush(stdout);
cudaFree(g_workm);
g_workm = NULL;
cudaFree(g_transform);
g_transform = NULL;
cudaFree(g_data);
g_data = NULL;
delete[] cd_eigen;
delete[] cr_eigen;
delete[] c_diags;
delete[] c_superdiags;
cublasShutdown();
cudaThreadExit();
return EXIT_FAILURE;
}
if(!pear_in)
{
cudaMemset(g_data, 0, pearson_mem_size);
cudaMemcpy(g_data, data, data_mem_size, cudaMemcpyHostToDevice);
printf("(On GPU)\t Calculating Pearson correlation coefficients for data matrix...", curr_step);
fflush(stdout);
gpu_pearson(g_data, g_workm, rows, cols);
//free unneeded memory
cudaFree(g_data);
g_data = NULL;
cudaMemcpy(pearson_out, g_workm, pearson_mem_size, cudaMemcpyDeviceToHost);
printf("Done.\n");
fflush(stdout);
}
else
{
//free unneeded memory
cudaFree(g_data);
g_data = NULL;
memcpy(pearson_out, data, data_mem_size);
cudaMemcpy(g_workm, data, data_mem_size, cudaMemcpyHostToDevice);
}
if(rows >= LARGE_MATRIX) print_mem();
printf("Done.\n");
fflush(stdout);
/*
* compute eigenvalues for the data matrix
*/
printf("(On CPU) Step %i: Computing eigenvalues for data matrice...", curr_step);
fflush(stdout);
float *pw = new float[rows];
/*
* compute eigenvalues for the data matrix
*/
gpu_transpose(g_workm, g_transpose, rows, rows);
cudaMemcpy(matrix, g_transpose, pearson_mem_size, cudaMemcpyDeviceToHost);
culaSsyev('N', 'L', rows, matrix, rows, cd_eigen);
checkStatus2(status);
gpu_transpose(g_transpose, g_workm, rows, rows);
printf("Done.\n");
fflush(stdout);
if(return_value == EXIT_FAILURE)
{
printf("\nOut of memory!!! Try running again...\n");
fflush(stdout);
cudaFree(g_workm);
g_workm = NULL;
cudaFree(g_transform);
g_transform = NULL;
delete[] cd_eigen;
delete[] cr_eigen;
delete[] c_diags;
delete[] c_superdiags;
cublasShutdown();
cudaThreadExit();
return EXIT_FAILURE;
}
printf("\t\t Data Eigenvalues\tRandom Eigenvalues (Last 5)\n");
for(int i = rows-4; i < rows; i++) printf("\t\t %f\t\t%f\n", cd_eigen[i], cr_eigen[i]);
if(pear_in||cd_eigen[rows-2]<cr_eigen[rows-1])
{
if(rows < K)
meaningful = rows;
else
meaningful = K;
printf("(On CPU)\t Computing tridiagonal eigenvectors for the K largest eigenvalues (K = %i)...", meaningful);
fflush(stdout);
culaSstein2(cd_eigen, rotations, matrix, meaningful,rows);
}else{
printf("(On CPU)\t Computing tridiagonal eigenvectors for the eigenvalues larger than the largest element in random %f ", cr_eigen[4]);
fflush(stdout);
meaningful = culaSstein(cd_eigen, rotations, matrix, rows, cr_eigen[rows-1]);
}
checkStatus2(status);
cudaMemcpy(g_transpose, rotations, pearson_mem_size, cudaMemcpyHostToDevice);
printf("Done.\n");
fflush(stdout);
printf("(On GPU)\t Backtransforming tridiagonal eigenvectors to those of the original matrix...");
fflush(stdout);
return_value = cublasSsormtr(g_transpose, g_transform, meaningful, rows);
gpu_transpose(g_transpose, g_workm, rows, rows);
printf("Done.\n");
fflush(stdout);
//transform eigenvectors to loadings
printf("(On GPU) Step %i: Transforming eigenvectors to component loadings...", curr_step);
fflush(stdout);
cudaFree(g_transform);
g_transform = NULL;
cudaMemcpy(g_workv, cd_eigen, meaningful*sizeof(float), cudaMemcpyHostToDevice);
gpu_transform_eigenvectors(g_workm, g_workv, rows, rows);
printf("Done.\n");
fflush(stdout);
curr_step++;
cudaFree(g_workv);
g_workv = NULL;
if(rows >= LARGE_MATRIX) print_mem();
//rotate loadings using varimax
printf("(On GPU) Step %i: Orthogonal rotation using Varimax...", curr_step);
fflush(stdout);
gpu_transpose(g_workm, g_transpose, rows, rows);
gpu_varimax(g_transpose, rows, meaningful);
gpu_transpose(g_transpose, g_workm, rows, rows);
printf("Done.\n\n");
fflush(stdout);
//copy results from device to host
cudaMemcpy(rotations, g_workm, pearson_mem_size, cudaMemcpyDeviceToHost);
// cleanup memory
cudaFree(g_workm);
g_workm = NULL;
cudaFree(g_transform);
g_transform = NULL;
delete[] cd_eigen;
delete[] cr_eigen;
delete[] c_diags;
delete[] c_superdiags;
cublasShutdown();
checkStatus2(status);
culaShutdown();
cudaThreadExit();
return meaningful;
}
//end rmta_gpu.cu
|
d8142accd211efbc68728e8c1376e28c7a4a6ac5.hip | // !!! This is a file automatically generated by hipify!!!
/* CUDA implementation of the N4 algorithm. */
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <string.h> // memset
#include <complex>
// Without this the complex number multiplication/division doesn't work.
#include <fftw3.h>
#include "helper_functions.h"
#include "helper_cuda.h"
#include "reducer.h"
#include "cudaN4.h"
// This simple kernel set the background pixel to a background_value.
__global__ void set_background_kernel(
// Input
const float * mask,
const float background_value,
const unsigned int n,
// Output
float * data){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
if (mask[i]==0){
data[i]=background_value;
}
}
// This calculates a[i]+=b[i]
__global__ void sum_inplace_kernel(
// Input
float * a, // output
const float * b,
const unsigned int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
a[i]+=b[i];
}
// calculate a-b
__global__ void subtract(
// Input
const float * a,
const float * b,
const unsigned int n,
// Output
float * out){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
out[i] = a[i] - b[i];
}
// calculate exp(logBiasField) in place
__global__ void exp_kernel(
// Input
const float * logBiasField,
const unsigned int n,
// output
float * biasField){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
biasField[i] = exp(logBiasField[i]);
}
// calculate img/exp(logBiasField) in place
__global__ void exp_and_divide_kernel(
// Input
const float * logBiasField,
const float * im,
const unsigned int n,
// output
float * im_normalized){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
im_normalized[i] = im[i]/exp(logBiasField[i]);
}
// This simple kernel simply calculate exp(a-b). Used in calculate bias field convergence.
__global__ void subtract_and_exp_kernel(
// Input
const float * a,
const float * b,
// Output
const unsigned int n,
float * out){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
out[i]= exp(a[i] - b[i]);
}
// Calculate a[i] = (a[i]-mean)^2
__global__ void subtract_mean_and_sqr_kernel(
float * a,
const float mean,
const unsigned int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
float am = a[i] - mean;
a[i] = am * am;
}
// Calculate c[i] = a[i] / (b[i]) if b[i]!=0 else 0
__global__ void divide_kernel(
// Input
const float * a,
const float * b,
const unsigned n,
// Output
float * c){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
if (b[i]!=0){
c[i] = a[i]/b[i];
}else{
c[i] = 0.;
}
}
// This function simply logs the image if mask[i]>0
__global__ void log_kernel(
// Input
const float * im,
const float * mask,
const unsigned int numberOfPixels,
// Output
float * im_log){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=numberOfPixels)
return;
if (mask[i]!=0){
im_log[i] = log(im[i]);
}else{
im_log[i] = 0;
}
}
// histogramGPU computes the histogram of an input array on the GPU
// This function is taken from
// https://github.com/kevinzakka/learn-cuda/blob/master/src/histogram.cu
// Modified to support fractional bin as the N4 implementation did.
__global__ void histogramGPU_kernel(
// Input
const float * input,
const float * mask,
const float in_min,
const float bin_slope,
const unsigned int numElems,
const unsigned int numBin,
// Output
float * bins){
int tx = threadIdx.x;
int bx = blockIdx.x;
int BLOCK_SIZE = blockDim.x;
// compute global thread coordinates
int i = (bx * BLOCK_SIZE) + tx;
// create a private histogram copy for each thread block
// size same as numBin
extern __shared__ float hist[];
// each thread must initialize more than 1 location
if (numBin > BLOCK_SIZE) {
for (int j=tx; j<numBin; j+=BLOCK_SIZE) {
hist[j] = 0.f;
}
}
// use the first `PRIVATE` threads of each block to init
else {
if (tx < numBin) {
hist[tx] = 0.f;
}
}
// wait for all threads in the block to finish
__syncthreads();
// update private histogram given the mask value
// this is safe due to short-circuit evaluation
if ((i < numElems) && (mask[i]!=0.)) {
// bin the input
float cidx =(input[i] - in_min)/bin_slope;
int idx = (int)floor(cidx);
float offset = cidx - (float)idx;
if( offset == 0. ){
atomicAdd(&(hist[idx]), 1.);
}
else if( idx < numBin - 1 ){
atomicAdd(&(hist[idx]), 1. - offset);
atomicAdd(&(hist[idx+1]), offset);
}
}
// wait for all threads in the block to finish
__syncthreads();
// each thread must update more than 1 location
if (numBin > BLOCK_SIZE) {
for (int j=tx; j<numBin; j+=BLOCK_SIZE) {
atomicAdd(&(bins[j]), hist[j]);
}
}
// use the first `PRIVATE` threads to update final histogram
else {
if (tx < numBin) {
atomicAdd(&(bins[tx]), hist[tx]);
}
}
}
// This maps the value based on the histogram adjusted from sharpening the histogram distribution.
__global__ void histogramMapping_kernel(
// Input
const float * before_sharpen,
const float * mask,
const float * E,
const float binMinimum,
const float histogramSlope,
const unsigned int n, // num pixel in image
const unsigned int numBin,
// Output
float * after_sharpen // Image after sharpening
){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
if (mask[i]==0.)
return;
float cidx = ( before_sharpen[i] - binMinimum ) / histogramSlope;
cidx = max(0., cidx); // In case numerical error send cidx<0...
unsigned int idx = floor( cidx );
float correctedPixel;
if( idx < numBin - 1 ){
correctedPixel = E[idx] + ( E[idx + 1] - E[idx] ) * ( cidx - idx );
} else {
correctedPixel = E[numBin - 1];
}
after_sharpen[i] = correctedPixel;
}
// This kernel has to be used with the upsampleLattice3D_gpu() with a fixed number of threads to produce the correct result.
__global__ void upsample_lattice_kernel_3D(
// Input
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
const unsigned int nx,
const unsigned int ny,
const unsigned int nz,
const unsigned int n2x,
const unsigned int n2y,
const unsigned int n2z,
const float * lattice,
// Output
float * lattice_upsample
){
// The index notation here is trying to match as closely as possible with the one on the numpy version.
// blockIdx-1: -1:n+1, -1:m+1, -1:l+1
// i: -1:n+1
int i = (int)blockIdx.x-1;
int j = (int)blockIdx.y-1;
int k = (int)blockIdx.z-1;
// blockDim.x
int xx = threadIdx.x;
int yy = threadIdx.y;
int zz = threadIdx.z;
int tx = threadIdx.x * blockDim.y * blockDim.z + threadIdx.y * blockDim.z + threadIdx.z;
float bw[2][3] = {
{1./8, 6./8, 1./8},
{0, 1./2, 1./2}
};
__shared__ float lattice_piece[27];
int BLOCK_SIZE = blockDim.x * blockDim.y * blockDim.z;
// Each threads load lattice pieces into the shared lattice_piece
for (int h=tx; h<27; h+=BLOCK_SIZE) {
int Z = h % 3 + k -1;
int Y = (h/3)%3 + j -1;
int X = h / 9 + i -1; // i-1:i+1
// clip the input lattice point to a constant boundary
X = min(max(X, -1), (int)(nx+1))+1;
Y = min(max(Y, -1), (int)(ny+1))+1;
Z = min(max(Z, -1), (int)(nz+1))+1;
lattice_piece[h] = lattice[ X * ncpt_y * ncpt_z + Y * ncpt_z + Z ];
}
__syncthreads();
// Now, sum the 27 elements weighted by the weight.
int idx = 2*i + xx;
int idy = 2*j + yy;
int idz = 2*k + zz;
if( (idx>=-1) && (idx<=(int)(2*nx+1)) &&
(idy>=-1) && (idy<=(int)(2*ny+1)) &&
(idz>=-1) && (idz<=(int)(2*nz+1))){
float ls = 0.;
#pragma unroll
for (int bx=0; bx<3; bx++){
#pragma unroll
for (int by=0; by<3; by++){
#pragma unroll
for (int bz=0; bz<3; bz++){
ls+=bw[xx][bx]*bw[yy][by]*bw[zz][bz] * lattice_piece[bx * 9 + by * 3 + bz];
}
}
}
idx+=1;
idy+=1;
idz+=1;
lattice_upsample[idx * n2y*n2z + idy * n2z + idz] = ls;
}
}
// This function calculates the cubic B spline coefficients.
__device__ __inline__ void cubicBspline(
// Input
const float t,
// Output
float * B){
float t2 = t*t;
float t3 = t*t*t;
float tm = 1-t;
B[0] = tm*tm*tm/6;
B[1] = (3*t3 - 6*t2 + 4)/6;
B[2] = (-3*t3 + 3*t2 + 3*t + 1)/6;
B[3] = t3/6;
}
// This evaluates the Bspline on each pixel on a field. This uses shared memory to store
// lattice value shared among nearby pixels.
__global__ void evaluate_bspline_kernel_3D(
// Input
const float * lattice,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
const unsigned int blocks_per_span_x, // How many block is there per span
const unsigned int blocks_per_span_y,
const unsigned int blocks_per_span_z,
const float span_x, // How long each span is
const float span_y,
const float span_z,
const unsigned int fx, // What is the output field size
const unsigned int fy,
const unsigned int fz,
// Output
float * fitted
){
__shared__ float lattice_piece[64];
// Start Spans index i,j,k. This is the index into the lattice's left top corner.
int si = blockIdx.x / (int)blocks_per_span_x;
int sj = blockIdx.y / (int)blocks_per_span_y;
int sk = blockIdx.z / (int)blocks_per_span_z;
// Which block it is within this span
int bnx = blockIdx.x % (int)blocks_per_span_x;
int bny = blockIdx.y % (int)blocks_per_span_y;
int bnz = blockIdx.z % (int)blocks_per_span_z;
int tx = threadIdx.x * blockDim.y * blockDim.z + threadIdx.y * blockDim.z + threadIdx.z;
int BLOCK_SIZE = blockDim.x * blockDim.y * blockDim.z;
// Each thread loads lattice values into the network
for (int h=tx; h<64; h+=BLOCK_SIZE) {
// Need to translate the h into span index.
// So threadidx -> linearize -> for loop -> delinearize -> lattice index
int skk = sk + h % 4;
int sjj = sj + (h/4) % 4;
int sii = si + h/16;
lattice_piece[h] = lattice[ sii * ncpt_y * ncpt_z + sjj * ncpt_z + skk ];
}
__syncthreads();
if ((blockIdx.x==0) && (blockIdx.y==0) && (blockIdx.z==0) &&
(threadIdx.x==0) && (threadIdx.y==1) && (threadIdx.z==0)){
for (int i=0; i<4; i++){
for (int j=0; j<4; j++){
for (int k=0; k<4; k++){
}
}
}
}
// Determine if the current thread correspond to an actual pixel.
// Example of starting pixel vs the Bspline points,
// If this is 72.3, then the right closest center point is 72.5 then pixel index is 72.
// If this is 72.6, then the right closest center point is 73.5 then pixel index is 73.
// So it ends up being rounding around 0.5.
// The start pixel that this span included
int start_pixel_x = round(span_x * si);
int start_pixel_y = round(span_y * sj);
int start_pixel_z = round(span_z * sk);
// The end pixel that this span included
int end_pixel_x = round(span_x * (si+1))-1;
int end_pixel_y = round(span_y * (sj+1))-1;
int end_pixel_z = round(span_z * (sk+1))-1;
// Pixel index
int i = start_pixel_x + bnx* blockDim.x + threadIdx.x;
int j = start_pixel_y + bny* blockDim.y + threadIdx.y;
int k = start_pixel_z + bnz* blockDim.z + threadIdx.z;
// If this thread actually correspond to an actual pixel, calculate the fitting value.
if ((i <= end_pixel_x) && (j <= end_pixel_y) && (k <= end_pixel_z) &&
(i < fx) && (j < fy) && (k < fz)){
// The normalized local coordinates t for calculating bspline coefficients
int bx = si;
int by = sj;
int bz = sk;
float tx = ((i+0.5) - bx * span_x)/span_x;
float ty = ((j+0.5) - by * span_y)/span_y;
float tz = ((k+0.5) - bz * span_z)/span_z;
// calculate B spline weight
float wx[4];
float wy[4];
float wz[4];
cubicBspline(tx, wx);
cubicBspline(ty, wy);
cubicBspline(tz, wz);
// Accumulate the values
float value = 0.;
#pragma unroll
for (int ix=0; ix<4; ix++){
#pragma unroll
for (int iy=0; iy<4; iy++){
#pragma unroll
for (int iz=0; iz<4; iz++){
value+=wx[ix]* wy[iy]* wz[iz] * lattice_piece[ix * 16 + iy * 4 + iz];
}
}
}
fitted[i*fy*fz + j * fz + k] = value;
}
}
// This function accumulate WC2_phi and WC2 for fitting.
__global__ void accumulate_WC2phic_and_WC2(
// Input
const float * field,
const float * mask,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
const unsigned int blocks_per_span_x, // How many block is there per span
const unsigned int blocks_per_span_y,
const unsigned int blocks_per_span_z,
const float span_x, // How long each span is
const float span_y,
const float span_z,
const unsigned int fx, // What is the input field size
const unsigned int fy,
const unsigned int fz,
// Output
float * wc2_phic,
float * wc2
){
// printf("Accumulate into wc2 = %f\n", wc2_local[h]);
// printf("Running the accumulation\n");
// Initialize the local wc2_phic and wc2 to 0
__shared__ float wc2phic_local[64];
__shared__ float wc2_local[64];
// Start Spans index i,j,k. This is the index into the lattice's left top corner.
int si = blockIdx.x / blocks_per_span_x;
int sj = blockIdx.y / blocks_per_span_y;
int sk = blockIdx.z / blocks_per_span_z;
// Which block it is within this span
int bnx = blockIdx.x % blocks_per_span_x;
int bny = blockIdx.y % blocks_per_span_y;
int bnz = blockIdx.z % blocks_per_span_z;
int tx = threadIdx.x * blockDim.y * blockDim.z + threadIdx.y * blockDim.z + threadIdx.z;
int BLOCK_SIZE = blockDim.x * blockDim.y * blockDim.z;
// Each thread initializes the wc2phi and wc2
for (int h=tx; h<64; h+=BLOCK_SIZE) {
wc2phic_local[h] = 0.;
wc2_local[h] = 0.;
}
__syncthreads();
// Determine if the current thread correspond to an actual pixel.
// Example of starting pixel vs the Bspline points,
// If this is 72.3, then the right closest center point is 72.5 then pixel index is 72.
// If this is 72.6, then the right closest center point is 73.5 then pixel index is 73.
// So it ends up being rounding around 0.5.
// The start pixel that this span included
int start_pixel_x = round(span_x * si);
int start_pixel_y = round(span_y * sj);
int start_pixel_z = round(span_z * sk);
// The end pixel that this span included
int end_pixel_x = round(span_x * (si+1))-1;
int end_pixel_y = round(span_y * (sj+1))-1;
int end_pixel_z = round(span_z * (sk+1))-1;
// Pixel index
int i = start_pixel_x + bnx* blockDim.x + threadIdx.x;
int j = start_pixel_y + bny* blockDim.y + threadIdx.y;
int k = start_pixel_z + bnz* blockDim.z + threadIdx.z;
// If this thread actually correspond to an actual pixel and the mask value is not zero, calculate the fitting value.
// Note that the (mask[fidx]!=0) has to be put to the end to ensure the input fidx is a valid index due to short
// circuit logic.
int fidx= i*fy*fz + j*fz + k;
if ((i <= end_pixel_x) && (j <= end_pixel_y) && (k <= end_pixel_z) &&
(i < fx) && (j < fy) && (k < fz) && (mask[fidx]!=0) ){
// The normalized local coordinates t for calculating bspline coefficients
int bx = si;
int by = sj;
int bz = sk;
float tx = ((i+0.5) - bx * span_x)/span_x;
float ty = ((j+0.5) - by * span_y)/span_y;
float tz = ((k+0.5) - bz * span_z)/span_z;
// calculate B spline weight
float wx[4];
float wy[4];
float wz[4];
cubicBspline(tx, wx);
cubicBspline(ty, wy);
cubicBspline(tz, wz);
// Calculate phi
float wc_sum = 0.;
float wc;
float phi_c;
// calculate wc_sum
#pragma unroll
for (int ix=0; ix<4; ix++){
#pragma unroll
for (int iy=0; iy<4; iy++){
#pragma unroll
for (int iz=0; iz<4; iz++){
wc = wx[ix]* wy[iy]* wz[iz];
wc_sum += wc*wc;
}
}
}
// calculate wc2_phi and wc2
float fv = field[fidx]; // field value
#pragma unroll
for (int ix=0; ix<4; ix++){
#pragma unroll
for (int iy=0; iy<4; iy++){
#pragma unroll
for (int iz=0; iz<4; iz++){
wc = wx[ix]* wy[iy]* wz[iz];
phi_c = fv * wc / wc_sum;
// Locally accumulate wc2phic and wc2. This eliminates the need for
// each thread directly accumulate into global memory.
atomicAdd(&wc2phic_local[ix*16 + iy*4 + iz], wc*wc*phi_c);
atomicAdd(&wc2_local[ix*16 + iy*4 + iz], wc*wc);
}
}
}
}
__syncthreads();
// Each thread accumulates the local wc2phi and wc2 into the global memory.
for (int h=tx; h<64; h+=BLOCK_SIZE) {
// Need to translate the h into span index.
// So threadidx -> linearize -> for loop -> delinearize -> lattice index
int skk = sk + h % 4;
int sjj = sj + (h/4) % 4;
int sii = si + h/16;
atomicAdd(&wc2_phic[sii * ncpt_y * ncpt_z + sjj * ncpt_z + skk], wc2phic_local[h]);
atomicAdd(&wc2[sii * ncpt_y * ncpt_z + sjj * ncpt_z + skk], wc2_local[h]);
}
}
// This sets the image below certain threshold to 0.
__global__ void lowthreshold(
const float low_value,
const unsigned int numberOfPixel,
float * im,
float * mask){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=numberOfPixel)
return;
if (im[i]<=low_value){
im[i]=0;
mask[i]=0;
}
}
// The function that takes in the sharpening kernel and sharpen the image.
void sharpenImage(
// Input
float * d_before_sharpen, // image before sharpening. Does not set const here because the min/max reduction has to set the background value.
const float * d_mask, // the input mask
const unsigned int numberOfPixels, // number of pixel within the image
const unsigned int NumberOfHistogramBins, // number of histogram bin
const unsigned int paddedHistogramSize, // number of histogram bin after padded
const unsigned int histogramOffset, // histogram offset
const float WienerFilterNoise, // when building the wiener filter, the tiny constant at the denominator to prevent divide by 0
const float BiasFieldFullWidthAtHalfMaximum, // gaussian filter width
float * h_V, // real values
float * h_F,
float * h_U,
float * h_numerator,
float * h_denominator,
float * h_E,
std::vector<std::complex<float>> & h_Vf, // complex values
std::vector<std::complex<float>> & h_Ff,
std::vector<std::complex<float>> & h_Uf,
std::vector<std::complex<float>> & h_numeratorf,
std::vector<std::complex<float>> & h_denominatorf,
fftwf_plan & pf_v, // FFTW plans
fftwf_plan & pf_f,
fftwf_plan & pf_numerator,
fftwf_plan & pf_denominator,
fftwf_plan & pb_u,
fftwf_plan & pb_numerator,
fftwf_plan & pb_denominator,
// Output
float * d_after_sharpen // Image after sharpening
){
// Define variables
float binMaximum, binMinimum;
dim3 threads_1d(1024, 1, 1);
dim3 blocks_1d( (numberOfPixels+1023)/1024, 1, 1);
// buffer needed for the min/max operation
float * d_buffer;
float * d_histogram;
float * d_E;
checkCudaErrors(hipMalloc((void **)&d_buffer, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_histogram, NumberOfHistogramBins*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_E, NumberOfHistogramBins*sizeof(float)));
checkCudaErrors(hipMemset(d_histogram, 0, NumberOfHistogramBins*sizeof(float))); // Init histogram to 0
// Within the range defined by the mask, get the min/max of the before_sharpen image.
hipLaunchKernelGGL(( set_background_kernel), dim3(blocks_1d), dim3(threads_1d) , 0, 0, d_mask, 10000000000000.0, numberOfPixels, d_before_sharpen);
binMinimum = Reducer::reduce_min_wrapper(numberOfPixels, d_before_sharpen, d_buffer);
hipLaunchKernelGGL(( set_background_kernel), dim3(blocks_1d), dim3(threads_1d) , 0, 0, d_mask, -10000000000000.0, numberOfPixels, d_before_sharpen);
binMaximum = Reducer::reduce_max_wrapper(numberOfPixels, d_before_sharpen, d_buffer);
hipLaunchKernelGGL(( set_background_kernel), dim3(blocks_1d), dim3(threads_1d) , 0, 0, d_mask, 0.0, numberOfPixels, d_before_sharpen);
// Calculate how large is each bin
float histogramSlope = ( binMaximum - binMinimum )/( (float)NumberOfHistogramBins - 1. );
// Create the intensity profile (within the masked region, if applicable)
// using a triangular parzen windowing scheme (bullshit parzen windowing. Simply a
// histogram considering fractional count).
hipLaunchKernelGGL(( histogramGPU_kernel), dim3(blocks_1d), dim3(threads_1d), NumberOfHistogramBins*sizeof(float) , 0,
// Input
d_before_sharpen,
d_mask,
binMinimum,
histogramSlope,
numberOfPixels,
NumberOfHistogramBins,
// Output
d_histogram
);
checkCudaErrors(hipMemcpy(&(h_V[histogramOffset]), d_histogram, NumberOfHistogramBins*sizeof(float), hipMemcpyDeviceToHost));
// confirmed against the python example that the histogram is correct at this point.
// Calculate the fft on the histogram, h_V -> h_Vf
fftwf_execute(pf_v);
// create a equal-size-to-histogram gaussian filter, fft it.
// Since the histogram size here is small (for a 200 bin at most 512 is needed. Use the CPU
// implementation of the fftw instead of cuFFT).
// Create the Gaussian filter.
float scaledFWHM = BiasFieldFullWidthAtHalfMaximum / histogramSlope;
float expFactor = 4.0 * log( 2.0 ) / (scaledFWHM * scaledFWHM);
float scaleFactor = 2.0 * sqrt( log( 2.0 )/ M_PI ) / scaledFWHM;
// These parameters matches the python implementation
// printf("GPU: Histogram slope/scaledFWHM/expFactor/scaleFactor: (%f, %f, %f, %f)\n", histogramSlope, scaledFWHM, expFactor, scaleFactor);
h_F[0] = scaleFactor;
unsigned int halfSize = (unsigned int)(0.5 * paddedHistogramSize);
for( unsigned int i = 1; i <= halfSize; i++ ){
h_F[i] = h_F[paddedHistogramSize - i] =
scaleFactor * exp( -expFactor*i*i );
}
if( paddedHistogramSize % 2 == 0 ){
h_F[halfSize] = scaleFactor * exp(
-0.25 * paddedHistogramSize*paddedHistogramSize*expFactor );
}
// FFT the gaussian kernel, h_F -> h_Ff
fftwf_execute(pf_f);
// change the Ff to Gf, multiply that with Vf and output to Uf
for( unsigned int i = 0; i < (paddedHistogramSize/2+1); i++ ){
// Make the Wiener deconvolution kernel and multiply with the signal.
std::complex<float> c = conj(h_Ff[i]);
std::complex<float> Gf = c / ( c * h_Ff[i] + WienerFilterNoise );
h_Uf[i] = h_Vf[i] * Gf.real() / (float)paddedHistogramSize ;
}
// iFFT the deconvolved histogram and set clip negative real value to 0
// h_Uf -> h_U. Note that this compared to the python implementation does not
// do the normalization. So here need to divide the paddedHistogram to do the
// normalization.
fftwf_execute(pb_u);
for( unsigned int i = 0; i < paddedHistogramSize; i++ ){
h_U[i] = max( h_U[i], 0.0 );
}
// The numerator is histBin * U, where U = deconv(V)
for( unsigned int i = 0; i < paddedHistogramSize; i++ ){
h_numerator[i] = ( (float)binMinimum + ((float)i - histogramOffset) * histogramSlope ) * h_U[i];
}
// This is simply using the gaussian kernel h_Ff to smooth out the numerator.
// smooth(hisBin * h_U)
fftwf_execute(pf_numerator);
for( unsigned int i = 0; i < (paddedHistogramSize/2+1); i++){
h_numeratorf[i] *= h_Ff[i];
}
fftwf_execute(pb_numerator);
// h_U -> h_denominatorf. This use directly h_U as input.
// smooth(h_U)
// Again this simply smooth the denominator with the gaussian kernel h_Ff.
fftwf_execute(pf_denominator);
for( unsigned int i = 0; i < (paddedHistogramSize/2+1); i++ ){
h_denominatorf[i]*= h_Ff[i];
}
fftwf_execute(pb_denominator); // h_denominatorf -> h_denominator
// The divide part. smooth(hisBin * h_U)/smooth(h_U)
// Build a map of image from old histogram to new histogram
// This skip the amount of histogramOffset.
for( unsigned int i = 0; i < NumberOfHistogramBins; i++ ){
if( h_denominator[i+histogramOffset] != 0.0 ){
h_E[i] = h_numerator[i+histogramOffset] / h_denominator[i+histogramOffset];
} else {
h_E[i] = 0.0;
}
}
// Map the pixel value using the map.
checkCudaErrors(hipMemcpy(d_E, h_E, NumberOfHistogramBins*sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( histogramMapping_kernel), dim3(blocks_1d), dim3(threads_1d) , 0, 0,
// Input
d_before_sharpen,
d_mask,
d_E,
binMinimum,
histogramSlope,
numberOfPixels, // num pixel in image
NumberOfHistogramBins,
// Output
d_after_sharpen // Image after sharpening
);
// Clean up
checkCudaErrors(hipFree(d_buffer));
checkCudaErrors(hipFree(d_histogram));
checkCudaErrors(hipFree(d_E));
}
// This function serves to test the log image and sharpenImage.
void testsharpenImage(
// Input
const float * h_before_sharpen, // image before sharpening. Does not set const here because the min/max reduction has to set the background value.
const float * h_mask,
const unsigned int numberOfPixels,
N4Param & param,
N4Data & data,
// Output
float * h_before_sharpen_log,
float * h_after_sharpen,
float & binMin_out,
float & binMax_out){
float * d_before_sharpen;
float * d_before_sharpen_log;
float * d_mask;
float * d_after_sharpen;
checkCudaErrors(hipMalloc((void **)&d_before_sharpen, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_mask, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_before_sharpen_log, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_after_sharpen, numberOfPixels*sizeof(float)));
// Copy the image and mask to gpu
checkCudaErrors(hipMemcpy(d_before_sharpen, h_before_sharpen, numberOfPixels*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_mask, h_mask, numberOfPixels*sizeof(float), hipMemcpyHostToDevice));
dim3 threads_1d(512, 1, 1);
dim3 blocks_1d((numberOfPixels+511)/512, 1, 1);
// Set the mask = 0 and im = 0 at the point where im<low_value
hipLaunchKernelGGL(( lowthreshold), dim3(blocks_1d), dim3(threads_1d) , 0, 0,
param.low_value,
numberOfPixels,
d_before_sharpen,
d_mask);
hipLaunchKernelGGL(( log_kernel), dim3(blocks_1d), dim3(threads_1d) , 0, 0,
// Input
d_before_sharpen,
d_mask,
numberOfPixels,
// Output
d_before_sharpen_log);
// Output the log image for checking
checkCudaErrors(hipMemcpy(h_before_sharpen_log, d_before_sharpen_log, numberOfPixels*sizeof(float), hipMemcpyDeviceToHost));
// Run the function
sharpenImage(
// Input
d_before_sharpen_log, // image before sharpening. Does not set const here because the min/max reduction has to set the background value.
d_mask, // the input mask
numberOfPixels, // number of pixel within the image
param.NumberOfHistogramBins, // number of histogram bin
param.paddedHistogramSize, // number of histogram bin after padded
param.histogramOffset, // histogram offset
param.WienerFilterNoise, // when building the wiener filter, the tiny constant at the denominator to prevent divide by 0
param.BiasFieldFullWidthAtHalfMaximum, // gaussian filter width
data.h_V, // real values
data.h_F,
data.h_U,
data.h_numerator,
data.h_denominator,
data.h_E,
data.h_Vf, // complex values
data.h_Ff,
data.h_Uf,
data.h_numeratorf,
data.h_denominatorf,
data.pf_v, // FFTW plans
data.pf_f,
data.pf_numerator,
data.pf_denominator,
data.pb_u,
data.pb_numerator,
data.pb_denominator,
// Output
d_after_sharpen // Image after sharpening
);
checkCudaErrors(hipMemcpy(h_after_sharpen, d_after_sharpen, numberOfPixels*sizeof(float), hipMemcpyDeviceToHost));
// Clean up
checkCudaErrors(hipFree(d_before_sharpen));
checkCudaErrors(hipFree(d_before_sharpen_log));
checkCudaErrors(hipFree(d_mask));
checkCudaErrors(hipFree(d_after_sharpen));
}
void calculateConvergenceMeasurement(
// Input
const float * d_field1,
const float * d_field2,
const float * d_mask,
const unsigned int numberOfPixels,
const float numberOfForeground,
// output
float & convergence){
dim3 threads_1d(512, 1, 1);
dim3 blocks_1d( (numberOfPixels+511)/512, 1, 1);
// calculate exp(field1 - field2)
float * d_pixel;
float * d_buffer;
checkCudaErrors(hipMalloc((void **)&d_pixel, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_buffer, numberOfPixels*sizeof(float)));
hipLaunchKernelGGL(( subtract_and_exp_kernel), dim3(blocks_1d), dim3(threads_1d) , 0, 0,
// Input
d_field1,
d_field2,
// Output
numberOfPixels,
d_pixel);
// mean and std of the mask region
hipLaunchKernelGGL(( set_background_kernel), dim3(blocks_1d), dim3(threads_1d) , 0, 0, d_mask, 0, numberOfPixels, d_pixel);
float mu = Reducer::reduce_sum_wrapper(numberOfPixels, d_pixel, d_buffer);
mu /= numberOfForeground;
// calculate (X - mean)^2
hipLaunchKernelGGL(( subtract_mean_and_sqr_kernel), dim3(blocks_1d), dim3(threads_1d) , 0, 0,
d_pixel,
mu,
numberOfPixels);
// Divide sum by N-1 and sqrt
hipLaunchKernelGGL(( set_background_kernel), dim3(blocks_1d), dim3(threads_1d) , 0, 0, d_mask, 0, numberOfPixels, d_pixel);
float sigma = Reducer::reduce_sum_wrapper(numberOfPixels, d_pixel, d_buffer);
sigma /= (numberOfForeground-1);
sigma = sqrt(sigma);
// output
convergence = sigma/mu;
checkCudaErrors(hipFree(d_pixel));
checkCudaErrors(hipFree(d_buffer));
}
// This function does the upsampling of the lattice on the CPU
// This function assume input to be (nx, ny, nz) number of control points
// and output to be (nx-3)*2 +3 number of control points.
// As suggested by the Bspline paper, this always upsample a lattice of size
// n+3 to 2n+3
void upsampleLattice3D_cpu(
// Input
const float * h_lattice,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
// Output
float * h_lattice_upsample){
float bw[2][3] = {
{1./8, 6./8, 1./8},
{0, 1./2, 1./2}
};
unsigned int nx = ncpt_x - 3;
unsigned int ny = ncpt_y - 3;
unsigned int nz = ncpt_z - 3;
// Size of the upsample lattice
unsigned int n2x = 2*nx + 3;
unsigned int n2y = 2*ny + 3;
unsigned int n2z = 2*nz + 3;
float lattice_piece[27];
// Loop through each point within the low resolution lattice
for (int i=-1; i<nx+2; i++){
for (int j=-1; j<ny+2; j++){
for (int k=-1; k<nz+2; k++){
// Each takes care the neighboring 8 points on the upsampled points
// as a linear combination of the neighboring 3x3x3 piece
// gather the lattice piece
// i/j/kl: (i-1)~(i+1)
for (int il = i-1; il<i+2; il++){
for (int jl = j-1; jl<j+2; jl++){
for (int kl = k-1; kl<k+2; kl++){
// i/j/klp: 0~2
int ilp = il-i+1;
int jlp = jl-j+1;
int klp = kl-k+1;
// clip the dimension within the lattice range
int ilc = min(max(il, -1), nx+1) +1;
int jlc = min(max(jl, -1), ny+1) +1;
int klc = min(max(kl, -1), nz+1) +1;
lattice_piece[ilp*9 + jlp * 3 + klp] =
h_lattice[ ilc * ncpt_y * ncpt_z + jlc * ncpt_z + klc];
}
}
}
// Do the accumulation for the 8 elements
for (int i2=0; i2<2; i2++){
for (int j2=0; j2<2; j2++){
for (int k2=0; k2<2; k2++){
int i2l = 2*i + i2;
int j2l = 2*j + j2;
int k2l = 2*k + k2;
// if statement to
if ((i2l>=-1) && (i2l<=2*nx+1) &&
(j2l>=-1) && (j2l<=2*ny+1) &&
(k2l>=-1) && (k2l<=2*nz+1)
){
// Each elements are sum of 27 terms.
float ls = 0.;
for (int bx=0; bx<3; bx++){
for (int by=0; by<3; by++){
for (int bz=0; bz<3; bz++){
ls+=bw[i2][bx]*bw[j2][by]*bw[k2][bz] * lattice_piece[bx * 9 + by * 3 + bz];
}
}
}
// Put the summed element into the array
h_lattice_upsample[(i2l+1) * n2y * n2z +
(j2l+1) * n2z +
k2l+1] = ls;
}
}
}
}
}
}
}
}
void upsampleLattice3D_gpu(
// Input
const float * d_lattice,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
// Output
float * d_lattice_upsample){
unsigned int nx = ncpt_x - 3;
unsigned int ny = ncpt_y - 3;
unsigned int nz = ncpt_z - 3;
// Size of the upsample lattice
unsigned int n2x = 2*nx + 3;
unsigned int n2y = 2*ny + 3;
unsigned int n2z = 2*nz + 3;
dim3 blocks_3d(ncpt_x, ncpt_y, ncpt_z);
dim3 threads_3d(2, 2, 2);
hipLaunchKernelGGL(( upsample_lattice_kernel_3D), dim3(blocks_3d), dim3(threads_3d) , 0, 0,
// Input
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
nx, // ncpt - 3, the base number
ny,
nz,
n2x, // number of control points on next level
n2y,
n2z,
d_lattice,
// Output
d_lattice_upsample
);
}
void testUpsampleLattice3D_gpu(
// Input
const float * h_lattice,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
// Output
float * h_lattice_upsample){
unsigned int numberOfLattice = ncpt_x * ncpt_y * ncpt_z;
// number of control points of the upsampled one.
unsigned int ncpt_x_n = (ncpt_x - 3) * 2 + 3;
unsigned int ncpt_y_n = (ncpt_y - 3) * 2 + 3;
unsigned int ncpt_z_n = (ncpt_z - 3) * 2 + 3;
unsigned int numberOfLattice_n = ncpt_x_n * ncpt_y_n * ncpt_z_n;
float * d_lattice;
float * d_lattice_upsample;
checkCudaErrors(hipMalloc((void **)&d_lattice, numberOfLattice*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_lattice_upsample, numberOfLattice_n*sizeof(float)));
checkCudaErrors(hipMemcpy(d_lattice, h_lattice, numberOfLattice*sizeof(float), hipMemcpyHostToDevice));
upsampleLattice3D_gpu(
// Input
d_lattice,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
// Output
d_lattice_upsample);
checkCudaErrors(hipMemcpy(h_lattice_upsample, d_lattice_upsample, numberOfLattice_n*sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_lattice));
checkCudaErrors(hipFree(d_lattice_upsample));
}
// This function evaluates Bspline on the field.
// To reduce total amount of global read, this again applies the same memory reading scheme
// as the one on on the fitting function.
void EvaluateBspline3D(
// Input
const float * d_lattice,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
const unsigned int fx, // size of the output field
const unsigned int fy,
const unsigned int fz,
// Output
float * d_fitted){
unsigned int numberOfSpansX = ncpt_x - 3;
unsigned int numberOfSpansY = ncpt_y - 3;
unsigned int numberOfSpansZ = ncpt_z - 3;
float span_x = (float)fx / numberOfSpansX;
float span_y = (float)fy / numberOfSpansY;
float span_z = (float)fz / numberOfSpansZ;
// If the spans is smaller than 8, then use the spans size for each block.
// Note that the threadblock size 8x8x8 on GTX 1080 with driver 418 doesn't work.
// The kernel simply silently not launch. Switch to 6x6x6 makes it work.
// int thread_x = (int)ceil(fmin(span_x, 8.f));
// int thread_y = (int)ceil(fmin(span_y, 8.f));
// int thread_z = (int)ceil(fmin(span_z, 8.f));
int thread_x = (int)ceil(fmin(span_x, 6.f));
int thread_y = (int)ceil(fmin(span_y, 6.f));
int thread_z = (int)ceil(fmin(span_z, 6.f));
int blocks_per_span_x = (int)ceil(span_x / (float)thread_x);
int blocks_per_span_y = (int)ceil(span_y / (float)thread_y);
int blocks_per_span_z = (int)ceil(span_z / (float)thread_z);
int num_block_x = numberOfSpansX * blocks_per_span_x;
int num_block_y = numberOfSpansY * blocks_per_span_y;
int num_block_z = numberOfSpansZ * blocks_per_span_z;
dim3 threads_3d(thread_x, thread_y, thread_z);
dim3 blocks_3d( num_block_x , num_block_y, num_block_z);
hipLaunchKernelGGL(( evaluate_bspline_kernel_3D), dim3(blocks_3d), dim3(threads_3d) , 0, 0,
// Output
d_lattice,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
blocks_per_span_x, // How many block is there per span
blocks_per_span_y,
blocks_per_span_z,
span_x, // How long each span is
span_y,
span_z,
fx, // What is the output field size
fy,
fz,
// Output
d_fitted
);
}
void testEvaluateBspline3D(// Input
const float * h_lattice,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
const unsigned int fx, // size of the output field
const unsigned int fy,
const unsigned int fz,
// Output
float * h_fitted){
float * d_lattice;
float * d_fitted;
unsigned int numberOfLattice = ncpt_x * ncpt_y * ncpt_z;
unsigned int numberOfPixels = fx * fy * fz;
checkCudaErrors(hipMalloc((void **)&d_lattice, numberOfLattice*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_fitted, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMemcpy(d_lattice, h_lattice, numberOfLattice*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemset(d_fitted, 0, numberOfLattice*sizeof(float)));
EvaluateBspline3D(
// Input
d_lattice,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
fx, // size of the output field
fy,
fz,
// Output
d_fitted);
checkCudaErrors(hipMemcpy(h_fitted, d_fitted, numberOfPixels*sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_lattice));
checkCudaErrors(hipFree(d_fitted));
}
// This given a field and a mask, fits a Bspline to the field according to basic
// algorithm from paper "Scattered Data Interpolation with Multilevel B-Splines".
// The block/thread/shared memory layout is exactly the same as EvaluateBspline3D
// except read/write were reversed.
void FitBspline3D(
// Input
const float * d_field,
const float * d_mask,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
const unsigned int fx, // size of the output field
const unsigned int fy,
const unsigned int fz,
// Output
float * d_lattice
){
// Initialize the numeratora and the denominator
unsigned int n_lattice = ncpt_x * ncpt_y * ncpt_z;
float * d_wc2_phic;
float * d_wc2;
float * d_buffer;
checkCudaErrors(hipMalloc((void **)&d_wc2_phic, n_lattice*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_wc2, n_lattice*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_buffer, n_lattice*sizeof(float)));
checkCudaErrors(hipMemset(d_wc2_phic, 0, n_lattice*sizeof(float)));
checkCudaErrors(hipMemset(d_wc2, 0, n_lattice*sizeof(float)));
// Same block/thread/shared memory layout as the EvaluateBspline3D.
unsigned int numberOfSpansX = ncpt_x - 3;
unsigned int numberOfSpansY = ncpt_y - 3;
unsigned int numberOfSpansZ = ncpt_z - 3;
float span_x = 1.0*fx / numberOfSpansX;
float span_y = 1.0*fy / numberOfSpansY;
float span_z = 1.0*fz / numberOfSpansZ;
// If the spans is smaller than 8, then use the spans size for each block.
// int thread_x = ceil(fmin(span_x, 8.f));
// int thread_y = ceil(fmin(span_y, 8.f));
// int thread_z = ceil(fmin(span_z, 8.f));
int thread_x = ceil(fmin(span_x, 6.f));
int thread_y = ceil(fmin(span_y, 6.f));
int thread_z = ceil(fmin(span_z, 6.f));
int blocks_per_span_x = ceil(span_x / thread_x);
int blocks_per_span_y = ceil(span_y / thread_y);
int blocks_per_span_z = ceil(span_z / thread_z);
int num_block_x = numberOfSpansX * blocks_per_span_x;
int num_block_y = numberOfSpansY * blocks_per_span_y;
int num_block_z = numberOfSpansZ * blocks_per_span_z;
dim3 threads_3d(thread_x, thread_y, thread_z);
dim3 blocks_3d(num_block_x , num_block_y, num_block_z);
// accumulate wc2_phi and wc2
hipLaunchKernelGGL(( accumulate_WC2phic_and_WC2), dim3(blocks_3d), dim3(threads_3d) , 0, 0,
// Input
d_field,
d_mask,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
blocks_per_span_x, // How many block is there per span
blocks_per_span_y,
blocks_per_span_z,
span_x, // How long each span is
span_y,
span_z,
fx, // What is the input field size
fy,
fz,
// Output
d_wc2_phic,
d_wc2
);
// lattice = wc2_phic / d_wc2
dim3 threads_1d(512, 1, 1);
dim3 blocks_1d((n_lattice+511)/512, 1, 1);
hipLaunchKernelGGL(( divide_kernel), dim3(blocks_1d), dim3(threads_1d) , 0, 0,
// Input
d_wc2_phic,
d_wc2,
n_lattice,
// Output
d_lattice);
// clean up
checkCudaErrors(hipFree(d_wc2_phic));
checkCudaErrors(hipFree(d_wc2));
checkCudaErrors(hipFree(d_buffer));
}
void testFitBspline3D(
// Input
const float * h_field,
const float * h_mask,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
const unsigned int fx, // size of the output field
const unsigned int fy,
const unsigned int fz,
// Output
float * h_lattice
){
float * d_field;
float * d_mask;
float * d_lattice;
unsigned int numberOfLattice = ncpt_x * ncpt_y * ncpt_z;
unsigned int numberOfPixels = fx * fy * fz;
checkCudaErrors(hipMalloc((void **)&d_field, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_mask, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_lattice, numberOfLattice*sizeof(float)));
checkCudaErrors(hipMemcpy(d_field, h_field, numberOfPixels*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_mask, h_mask, numberOfPixels*sizeof(float), hipMemcpyHostToDevice));
FitBspline3D(
// Input
d_field,
d_mask,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
fx, // size of the output field
fy,
fz,
// Output
d_lattice
);
checkCudaErrors(hipMemcpy(h_lattice, d_lattice, numberOfLattice*sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_field));
checkCudaErrors(hipFree(d_mask));
checkCudaErrors(hipFree(d_lattice));
}
// The overall N4 function
void N4(
// Input
float * d_im, // The input image
float * d_mask, // The mask. Right now only binary mask are supported.
const unsigned int fx, // What is the input field size
const unsigned int fy,
const unsigned int fz,
N4Param & param,
N4Data & data, // Some additional data structure needed to run the function,
// mostly are fft stuff needed for fftw.
// Output
float * d_im_normalized, // The image after normalization
float * d_lattice, // The resulting lattice representing the bias field.
// Size of this should be equal to 2^(number of level-1) * (initial_point-3) + 3
// Optional. Also output the bias field
float * d_biasField
){
unsigned int numberOfPixels = fx * fy * fz;
// Define variables
unsigned int ncpt_x = param.NumberOfControlPoints_x; // number of control points
unsigned int ncpt_y = param.NumberOfControlPoints_y;
unsigned int ncpt_z = param.NumberOfControlPoints_z;
unsigned int numberOfLattice = ncpt_x * ncpt_y * ncpt_z;
unsigned int ncpt_x_n; // number of control points on next level
unsigned int ncpt_y_n;
unsigned int ncpt_z_n;
unsigned int numberOfLattice_n;
float CurrentConvergenceMeasurement;
unsigned int elapsedIterations;
dim3 threads_1d(512, 1, 1);
dim3 blocks_1d((numberOfPixels+511)/512, 1, 1);
// Set the mask = 0 and im = 0 at the point where im<low_value
hipLaunchKernelGGL(( lowthreshold), dim3(blocks_1d), dim3(threads_1d) , 0, 0,
param.low_value,
numberOfPixels,
d_im,
d_mask);
float * h_buffer = new float[numberOfPixels];
float spacing[] = {1,1,1};
int size[] = {(int)fz, (int)fy, (int)fx};
float * d_im_log;
float * d_logUncorrectedImage;
float * d_logSharpenedImage;
float * d_residualBiasField;
float * d_logBiasField;
float * d_newLogBiasField;
float * d_buffer;
float * d_temp;
float * d_lattice_c_residual; // current level lattice fitted to the residual
float * d_lattice_c; // current level lattice accumulated
float * d_lattice_n; // next level lattice
// Assign space
checkCudaErrors(hipMalloc((void **)&d_im_log, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_logUncorrectedImage, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_logSharpenedImage, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_residualBiasField, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_logBiasField, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_newLogBiasField, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_buffer, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMemset(d_im_log, 0, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMemset(d_logBiasField, 0, numberOfPixels*sizeof(float))); // set the logBiasField initially to 0.
checkCudaErrors(hipMemset(d_buffer, 0, numberOfPixels*sizeof(float))); // set the logBiasField initially to 0.
hipLaunchKernelGGL(( log_kernel), dim3(blocks_1d), dim3(threads_1d) , 0, 0,
// Input
d_im,
d_mask,
numberOfPixels,
// Output
d_im_log);
float numberOfForeground = Reducer::reduce_sum_wrapper(numberOfPixels, d_mask, d_buffer);
checkCudaErrors(hipMemcpy(d_logUncorrectedImage, d_im_log, numberOfPixels*sizeof(float), hipMemcpyDeviceToDevice));
// assign lattice space for the first level
checkCudaErrors(hipMalloc((void **)&d_lattice_c, numberOfLattice*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_lattice_c_residual, numberOfLattice*sizeof(float)));
checkCudaErrors(hipMemset(d_lattice_c, 0, numberOfLattice*sizeof(float))); // Init the initial lattice to 0.
for (int currentLevel = 0; currentLevel<param.NumberOfFittingLevels; currentLevel++){
// Calculate number of control points at this level
// Currently the fitting resolution of the 3 axes are tied together. Later maybe
// we can make it separate. Anyway.
CurrentConvergenceMeasurement = 10000000000.0;
elapsedIterations = 0;
dim3 threads_1d_lattice(512, 1, 1);
dim3 blocks_1d_lattice((numberOfLattice+511)/512, 1, 1);
while ((CurrentConvergenceMeasurement > param.ConvergenceThreshold) &&
(elapsedIterations<param.MaximumNumberOfIterations)){
printf("Level %d, iter %d\n", currentLevel, elapsedIterations);
elapsedIterations++;
// Sharpen the image
sharpenImage(
// Input
d_logUncorrectedImage, // image before sharpening. Does not set const here because the min/max reduction has to set the background value.
d_mask, // the input mask
numberOfPixels, // number of pixel within the image
param.NumberOfHistogramBins, // number of histogram bin
param.paddedHistogramSize, // number of histogram bin after padded
param.histogramOffset, // histogram offset
param.WienerFilterNoise, // when building the wiener filter, the tiny constant at the denominator to prevent divide by 0
param.BiasFieldFullWidthAtHalfMaximum, // gaussian filter width
data.h_V, // real values
data.h_F,
data.h_U,
data.h_numerator,
data.h_denominator,
data.h_E,
data.h_Vf, // complex values
data.h_Ff,
data.h_Uf,
data.h_numeratorf,
data.h_denominatorf,
data.pf_v, // FFTW plans
data.pf_f,
data.pf_numerator,
data.pf_denominator,
data.pb_u,
data.pb_numerator,
data.pb_denominator,
// Output
d_logSharpenedImage // Image after sharpening
);
// Benchmark code- Start
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start); // Benchmark code
hipLaunchKernelGGL(( subtract), dim3(blocks_1d), dim3(threads_1d) , 0, 0,
// Input
d_logUncorrectedImage,
d_logSharpenedImage,
numberOfPixels,
// Output
d_residualBiasField);
// Fit a new Bspline lattice to the current residual field
// clear the
FitBspline3D(
// Input
d_residualBiasField,
d_mask,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
fx, // size of the output field
fy,
fz,
// Output
d_lattice_c_residual
);
// Accumulate the residual lattice in overall lattice
hipLaunchKernelGGL(( sum_inplace_kernel), dim3(blocks_1d_lattice), dim3(threads_1d_lattice) , 0, 0,
// Input
d_lattice_c, // output
d_lattice_c_residual,
numberOfLattice);
// calculate the new bias field
EvaluateBspline3D(
// Input
d_lattice_c,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
fx, // size of the output field
fy,
fz,
// Output
d_newLogBiasField);
// Benchmark code - End
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Smooth image takes %f ms\n.", milliseconds);
// calculate convergence
calculateConvergenceMeasurement(
// Input
d_logBiasField,
d_newLogBiasField,
d_mask,
numberOfPixels,
numberOfForeground,
// output
param.ConvergenceThreshold);
// Update the logBiasField. Use pointer swap here to save time on copying data.
d_temp = d_logBiasField;
d_logBiasField = d_newLogBiasField;
d_newLogBiasField = d_temp;
// Update logUncorrectedImage
hipLaunchKernelGGL(( subtract), dim3(blocks_1d), dim3(threads_1d) , 0, 0,
// Input
d_im_log,
d_logBiasField,
numberOfPixels,
// Output
d_logUncorrectedImage);
}
// Upsample the lattice if not the last level
if (currentLevel!=param.NumberOfFittingLevels-1){
ncpt_x_n = (ncpt_x - 3) * 2 + 3; // number of control points. Since we don't how many points initially the user input so we iterate like this.
ncpt_y_n = (ncpt_y - 3) * 2 + 3;
ncpt_z_n = (ncpt_z - 3) * 2 + 3;
numberOfLattice_n = ncpt_x_n * ncpt_y_n * ncpt_z_n;
// Assign memory for the next level lattice
checkCudaErrors(hipMalloc((void **)&d_lattice_n, numberOfLattice_n*sizeof(float)));
// Upsample
upsampleLattice3D_gpu(
// Input
d_lattice_c,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
// Output
d_lattice_n);
// Free up the current lattice and pointer swap to next
checkCudaErrors(hipFree(d_lattice_c));
checkCudaErrors(hipFree(d_lattice_c_residual));
d_lattice_c = d_lattice_n;
// lattice_residual for next level
checkCudaErrors(hipMalloc((void **)&d_lattice_c_residual, numberOfLattice_n*sizeof(float)));
// Update number of control points to next level
ncpt_x = ncpt_x_n;
ncpt_y = ncpt_y_n;
ncpt_z = ncpt_z_n;
numberOfLattice = numberOfLattice_n;
}
}
// Final normalization
// calculate the final field
EvaluateBspline3D(
// Input
d_lattice_c,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
fx, // size of the output field
fy,
fz,
// Output
d_logBiasField);
hipLaunchKernelGGL(( exp_and_divide_kernel), dim3(blocks_1d), dim3(threads_1d) , 0, 0,
// Input
d_logBiasField,
d_im,
numberOfPixels,
// output
d_im_normalized);
// Output the bias field
if (d_biasField != NULL){
hipLaunchKernelGGL(( exp_kernel), dim3(blocks_1d), dim3(threads_1d) , 0, 0,
// Input
d_logBiasField,
numberOfPixels,
// output
d_biasField);
}
delete [] h_buffer;
// Save the lattice to output
checkCudaErrors(hipMemcpy(d_lattice, d_lattice_c, numberOfLattice*sizeof(float), hipMemcpyDeviceToDevice));
// clean up
checkCudaErrors(hipFree(d_im_log));
checkCudaErrors(hipFree(d_logUncorrectedImage));
checkCudaErrors(hipFree(d_logSharpenedImage));
checkCudaErrors(hipFree(d_residualBiasField));
checkCudaErrors(hipFree(d_logBiasField));
checkCudaErrors(hipFree(d_newLogBiasField));
checkCudaErrors(hipFree(d_buffer));
checkCudaErrors(hipFree(d_lattice_c));
checkCudaErrors(hipFree(d_lattice_c_residual));
}
void testN4(
// Input
float * h_im, // The input image
float * h_mask, // The mask. Right now only binary mask are supported.
const unsigned int fx, // What is the input field size
const unsigned int fy,
const unsigned int fz,
N4Param & param,
N4Data & data, // Some additional data structure needed to run the function,
// mostly are fft stuff needed for fftw.
// Output
float * h_im_normalized, // The image after normalization
float * h_lattice, // The resulting lattice representing the bias field.
// Size of this should be equal to 2^(number of level-1) * (initial_point-3) + 3
// Optional. Also output the bias field
float * h_biasField
){
float * d_im;
float * d_im_normalized;
float * d_mask;
float * d_lattice;
float * d_biasField;
unsigned int ncpt_x = param.NumberOfControlPoints_x;
unsigned int ncpt_y = param.NumberOfControlPoints_y;
unsigned int ncpt_z = param.NumberOfControlPoints_z;
for (unsigned int i=0; i< param.NumberOfFittingLevels-1; i++){
ncpt_x=(ncpt_x-3)*2+3;
ncpt_y=(ncpt_y-3)*2+3;
ncpt_z=(ncpt_z-3)*2+3;
}
unsigned int numberOfLattice = ncpt_x * ncpt_y * ncpt_z;
unsigned int numberOfPixels = fx * fy * fz;
printf("Test lattice size: %d\n", numberOfLattice);
checkCudaErrors(hipMalloc((void **)&d_im, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_mask, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_im_normalized, numberOfPixels*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_lattice, numberOfLattice*sizeof(float)));
// Optional. Also output the bias field
if (h_biasField!=NULL){
checkCudaErrors(hipMalloc((void **)&d_biasField, numberOfPixels*sizeof(float)));
}else{
d_biasField = NULL;
}
checkCudaErrors(hipMemcpy(d_im, h_im, numberOfPixels*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_mask, h_mask, numberOfPixels*sizeof(float), hipMemcpyHostToDevice));
N4(
// Input
d_im, // The input image
d_mask, // The mask. Right now only binary mask are supported.
fx, // What is the input field size
fy,
fz,
param,
data, // Some additional data structure needed to run the function,
// mostly are fft stuff needed for fftw.
// Output
d_im_normalized, // The image after normalization
d_lattice, // The resulting lattice representing the bias field.
// Size of this should be equal to 2^(number of level-1) * (initial_point-3) + 3
// Optional. Also output the bias field
d_biasField
);
checkCudaErrors(hipMemcpy(h_im_normalized, d_im_normalized, numberOfPixels*sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_lattice, d_lattice, numberOfLattice*sizeof(float), hipMemcpyDeviceToHost));
// Optional. Also output the bias field
if (h_biasField!=NULL){
checkCudaErrors(hipMemcpy(h_biasField, d_biasField, numberOfPixels*sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_biasField));
}
checkCudaErrors(hipFree(d_im));
checkCudaErrors(hipFree(d_mask));
checkCudaErrors(hipFree(d_im_normalized));
checkCudaErrors(hipFree(d_lattice));
}
| d8142accd211efbc68728e8c1376e28c7a4a6ac5.cu | /* CUDA implementation of the N4 algorithm. */
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
#include <string.h> // memset
#include <complex>
// Without this the complex number multiplication/division doesn't work.
#include <fftw3.h>
#include "helper_functions.h"
#include "helper_cuda.h"
#include "reducer.h"
#include "cudaN4.h"
// This simple kernel set the background pixel to a background_value.
__global__ void set_background_kernel(
// Input
const float * mask,
const float background_value,
const unsigned int n,
// Output
float * data){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
if (mask[i]==0){
data[i]=background_value;
}
}
// This calculates a[i]+=b[i]
__global__ void sum_inplace_kernel(
// Input
float * a, // output
const float * b,
const unsigned int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
a[i]+=b[i];
}
// calculate a-b
__global__ void subtract(
// Input
const float * a,
const float * b,
const unsigned int n,
// Output
float * out){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
out[i] = a[i] - b[i];
}
// calculate exp(logBiasField) in place
__global__ void exp_kernel(
// Input
const float * logBiasField,
const unsigned int n,
// output
float * biasField){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
biasField[i] = exp(logBiasField[i]);
}
// calculate img/exp(logBiasField) in place
__global__ void exp_and_divide_kernel(
// Input
const float * logBiasField,
const float * im,
const unsigned int n,
// output
float * im_normalized){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
im_normalized[i] = im[i]/exp(logBiasField[i]);
}
// This simple kernel simply calculate exp(a-b). Used in calculate bias field convergence.
__global__ void subtract_and_exp_kernel(
// Input
const float * a,
const float * b,
// Output
const unsigned int n,
float * out){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
out[i]= exp(a[i] - b[i]);
}
// Calculate a[i] = (a[i]-mean)^2
__global__ void subtract_mean_and_sqr_kernel(
float * a,
const float mean,
const unsigned int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
float am = a[i] - mean;
a[i] = am * am;
}
// Calculate c[i] = a[i] / (b[i]) if b[i]!=0 else 0
__global__ void divide_kernel(
// Input
const float * a,
const float * b,
const unsigned n,
// Output
float * c){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
if (b[i]!=0){
c[i] = a[i]/b[i];
}else{
c[i] = 0.;
}
}
// This function simply logs the image if mask[i]>0
__global__ void log_kernel(
// Input
const float * im,
const float * mask,
const unsigned int numberOfPixels,
// Output
float * im_log){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=numberOfPixels)
return;
if (mask[i]!=0){
im_log[i] = log(im[i]);
}else{
im_log[i] = 0;
}
}
// histogramGPU computes the histogram of an input array on the GPU
// This function is taken from
// https://github.com/kevinzakka/learn-cuda/blob/master/src/histogram.cu
// Modified to support fractional bin as the N4 implementation did.
__global__ void histogramGPU_kernel(
// Input
const float * input,
const float * mask,
const float in_min,
const float bin_slope,
const unsigned int numElems,
const unsigned int numBin,
// Output
float * bins){
int tx = threadIdx.x;
int bx = blockIdx.x;
int BLOCK_SIZE = blockDim.x;
// compute global thread coordinates
int i = (bx * BLOCK_SIZE) + tx;
// create a private histogram copy for each thread block
// size same as numBin
extern __shared__ float hist[];
// each thread must initialize more than 1 location
if (numBin > BLOCK_SIZE) {
for (int j=tx; j<numBin; j+=BLOCK_SIZE) {
hist[j] = 0.f;
}
}
// use the first `PRIVATE` threads of each block to init
else {
if (tx < numBin) {
hist[tx] = 0.f;
}
}
// wait for all threads in the block to finish
__syncthreads();
// update private histogram given the mask value
// this is safe due to short-circuit evaluation
if ((i < numElems) && (mask[i]!=0.)) {
// bin the input
float cidx =(input[i] - in_min)/bin_slope;
int idx = (int)floor(cidx);
float offset = cidx - (float)idx;
if( offset == 0. ){
atomicAdd(&(hist[idx]), 1.);
}
else if( idx < numBin - 1 ){
atomicAdd(&(hist[idx]), 1. - offset);
atomicAdd(&(hist[idx+1]), offset);
}
}
// wait for all threads in the block to finish
__syncthreads();
// each thread must update more than 1 location
if (numBin > BLOCK_SIZE) {
for (int j=tx; j<numBin; j+=BLOCK_SIZE) {
atomicAdd(&(bins[j]), hist[j]);
}
}
// use the first `PRIVATE` threads to update final histogram
else {
if (tx < numBin) {
atomicAdd(&(bins[tx]), hist[tx]);
}
}
}
// This maps the value based on the histogram adjusted from sharpening the histogram distribution.
__global__ void histogramMapping_kernel(
// Input
const float * before_sharpen,
const float * mask,
const float * E,
const float binMinimum,
const float histogramSlope,
const unsigned int n, // num pixel in image
const unsigned int numBin,
// Output
float * after_sharpen // Image after sharpening
){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n)
return;
if (mask[i]==0.)
return;
float cidx = ( before_sharpen[i] - binMinimum ) / histogramSlope;
cidx = max(0., cidx); // In case numerical error send cidx<0...
unsigned int idx = floor( cidx );
float correctedPixel;
if( idx < numBin - 1 ){
correctedPixel = E[idx] + ( E[idx + 1] - E[idx] ) * ( cidx - idx );
} else {
correctedPixel = E[numBin - 1];
}
after_sharpen[i] = correctedPixel;
}
// This kernel has to be used with the upsampleLattice3D_gpu() with a fixed number of threads to produce the correct result.
__global__ void upsample_lattice_kernel_3D(
// Input
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
const unsigned int nx,
const unsigned int ny,
const unsigned int nz,
const unsigned int n2x,
const unsigned int n2y,
const unsigned int n2z,
const float * lattice,
// Output
float * lattice_upsample
){
// The index notation here is trying to match as closely as possible with the one on the numpy version.
// blockIdx-1: -1:n+1, -1:m+1, -1:l+1
// i: -1:n+1
int i = (int)blockIdx.x-1;
int j = (int)blockIdx.y-1;
int k = (int)blockIdx.z-1;
// blockDim.x
int xx = threadIdx.x;
int yy = threadIdx.y;
int zz = threadIdx.z;
int tx = threadIdx.x * blockDim.y * blockDim.z + threadIdx.y * blockDim.z + threadIdx.z;
float bw[2][3] = {
{1./8, 6./8, 1./8},
{0, 1./2, 1./2}
};
__shared__ float lattice_piece[27];
int BLOCK_SIZE = blockDim.x * blockDim.y * blockDim.z;
// Each threads load lattice pieces into the shared lattice_piece
for (int h=tx; h<27; h+=BLOCK_SIZE) {
int Z = h % 3 + k -1;
int Y = (h/3)%3 + j -1;
int X = h / 9 + i -1; // i-1:i+1
// clip the input lattice point to a constant boundary
X = min(max(X, -1), (int)(nx+1))+1;
Y = min(max(Y, -1), (int)(ny+1))+1;
Z = min(max(Z, -1), (int)(nz+1))+1;
lattice_piece[h] = lattice[ X * ncpt_y * ncpt_z + Y * ncpt_z + Z ];
}
__syncthreads();
// Now, sum the 27 elements weighted by the weight.
int idx = 2*i + xx;
int idy = 2*j + yy;
int idz = 2*k + zz;
if( (idx>=-1) && (idx<=(int)(2*nx+1)) &&
(idy>=-1) && (idy<=(int)(2*ny+1)) &&
(idz>=-1) && (idz<=(int)(2*nz+1))){
float ls = 0.;
#pragma unroll
for (int bx=0; bx<3; bx++){
#pragma unroll
for (int by=0; by<3; by++){
#pragma unroll
for (int bz=0; bz<3; bz++){
ls+=bw[xx][bx]*bw[yy][by]*bw[zz][bz] * lattice_piece[bx * 9 + by * 3 + bz];
}
}
}
idx+=1;
idy+=1;
idz+=1;
lattice_upsample[idx * n2y*n2z + idy * n2z + idz] = ls;
}
}
// This function calculates the cubic B spline coefficients.
__device__ __inline__ void cubicBspline(
// Input
const float t,
// Output
float * B){
float t2 = t*t;
float t3 = t*t*t;
float tm = 1-t;
B[0] = tm*tm*tm/6;
B[1] = (3*t3 - 6*t2 + 4)/6;
B[2] = (-3*t3 + 3*t2 + 3*t + 1)/6;
B[3] = t3/6;
}
// This evaluates the Bspline on each pixel on a field. This uses shared memory to store
// lattice value shared among nearby pixels.
__global__ void evaluate_bspline_kernel_3D(
// Input
const float * lattice,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
const unsigned int blocks_per_span_x, // How many block is there per span
const unsigned int blocks_per_span_y,
const unsigned int blocks_per_span_z,
const float span_x, // How long each span is
const float span_y,
const float span_z,
const unsigned int fx, // What is the output field size
const unsigned int fy,
const unsigned int fz,
// Output
float * fitted
){
__shared__ float lattice_piece[64];
// Start Spans index i,j,k. This is the index into the lattice's left top corner.
int si = blockIdx.x / (int)blocks_per_span_x;
int sj = blockIdx.y / (int)blocks_per_span_y;
int sk = blockIdx.z / (int)blocks_per_span_z;
// Which block it is within this span
int bnx = blockIdx.x % (int)blocks_per_span_x;
int bny = blockIdx.y % (int)blocks_per_span_y;
int bnz = blockIdx.z % (int)blocks_per_span_z;
int tx = threadIdx.x * blockDim.y * blockDim.z + threadIdx.y * blockDim.z + threadIdx.z;
int BLOCK_SIZE = blockDim.x * blockDim.y * blockDim.z;
// Each thread loads lattice values into the network
for (int h=tx; h<64; h+=BLOCK_SIZE) {
// Need to translate the h into span index.
// So threadidx -> linearize -> for loop -> delinearize -> lattice index
int skk = sk + h % 4;
int sjj = sj + (h/4) % 4;
int sii = si + h/16;
lattice_piece[h] = lattice[ sii * ncpt_y * ncpt_z + sjj * ncpt_z + skk ];
}
__syncthreads();
if ((blockIdx.x==0) && (blockIdx.y==0) && (blockIdx.z==0) &&
(threadIdx.x==0) && (threadIdx.y==1) && (threadIdx.z==0)){
for (int i=0; i<4; i++){
for (int j=0; j<4; j++){
for (int k=0; k<4; k++){
}
}
}
}
// Determine if the current thread correspond to an actual pixel.
// Example of starting pixel vs the Bspline points,
// If this is 72.3, then the right closest center point is 72.5 then pixel index is 72.
// If this is 72.6, then the right closest center point is 73.5 then pixel index is 73.
// So it ends up being rounding around 0.5.
// The start pixel that this span included
int start_pixel_x = round(span_x * si);
int start_pixel_y = round(span_y * sj);
int start_pixel_z = round(span_z * sk);
// The end pixel that this span included
int end_pixel_x = round(span_x * (si+1))-1;
int end_pixel_y = round(span_y * (sj+1))-1;
int end_pixel_z = round(span_z * (sk+1))-1;
// Pixel index
int i = start_pixel_x + bnx* blockDim.x + threadIdx.x;
int j = start_pixel_y + bny* blockDim.y + threadIdx.y;
int k = start_pixel_z + bnz* blockDim.z + threadIdx.z;
// If this thread actually correspond to an actual pixel, calculate the fitting value.
if ((i <= end_pixel_x) && (j <= end_pixel_y) && (k <= end_pixel_z) &&
(i < fx) && (j < fy) && (k < fz)){
// The normalized local coordinates t for calculating bspline coefficients
int bx = si;
int by = sj;
int bz = sk;
float tx = ((i+0.5) - bx * span_x)/span_x;
float ty = ((j+0.5) - by * span_y)/span_y;
float tz = ((k+0.5) - bz * span_z)/span_z;
// calculate B spline weight
float wx[4];
float wy[4];
float wz[4];
cubicBspline(tx, wx);
cubicBspline(ty, wy);
cubicBspline(tz, wz);
// Accumulate the values
float value = 0.;
#pragma unroll
for (int ix=0; ix<4; ix++){
#pragma unroll
for (int iy=0; iy<4; iy++){
#pragma unroll
for (int iz=0; iz<4; iz++){
value+=wx[ix]* wy[iy]* wz[iz] * lattice_piece[ix * 16 + iy * 4 + iz];
}
}
}
fitted[i*fy*fz + j * fz + k] = value;
}
}
// This function accumulate WC2_phi and WC2 for fitting.
__global__ void accumulate_WC2phic_and_WC2(
// Input
const float * field,
const float * mask,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
const unsigned int blocks_per_span_x, // How many block is there per span
const unsigned int blocks_per_span_y,
const unsigned int blocks_per_span_z,
const float span_x, // How long each span is
const float span_y,
const float span_z,
const unsigned int fx, // What is the input field size
const unsigned int fy,
const unsigned int fz,
// Output
float * wc2_phic,
float * wc2
){
// printf("Accumulate into wc2 = %f\n", wc2_local[h]);
// printf("Running the accumulation\n");
// Initialize the local wc2_phic and wc2 to 0
__shared__ float wc2phic_local[64];
__shared__ float wc2_local[64];
// Start Spans index i,j,k. This is the index into the lattice's left top corner.
int si = blockIdx.x / blocks_per_span_x;
int sj = blockIdx.y / blocks_per_span_y;
int sk = blockIdx.z / blocks_per_span_z;
// Which block it is within this span
int bnx = blockIdx.x % blocks_per_span_x;
int bny = blockIdx.y % blocks_per_span_y;
int bnz = blockIdx.z % blocks_per_span_z;
int tx = threadIdx.x * blockDim.y * blockDim.z + threadIdx.y * blockDim.z + threadIdx.z;
int BLOCK_SIZE = blockDim.x * blockDim.y * blockDim.z;
// Each thread initializes the wc2phi and wc2
for (int h=tx; h<64; h+=BLOCK_SIZE) {
wc2phic_local[h] = 0.;
wc2_local[h] = 0.;
}
__syncthreads();
// Determine if the current thread correspond to an actual pixel.
// Example of starting pixel vs the Bspline points,
// If this is 72.3, then the right closest center point is 72.5 then pixel index is 72.
// If this is 72.6, then the right closest center point is 73.5 then pixel index is 73.
// So it ends up being rounding around 0.5.
// The start pixel that this span included
int start_pixel_x = round(span_x * si);
int start_pixel_y = round(span_y * sj);
int start_pixel_z = round(span_z * sk);
// The end pixel that this span included
int end_pixel_x = round(span_x * (si+1))-1;
int end_pixel_y = round(span_y * (sj+1))-1;
int end_pixel_z = round(span_z * (sk+1))-1;
// Pixel index
int i = start_pixel_x + bnx* blockDim.x + threadIdx.x;
int j = start_pixel_y + bny* blockDim.y + threadIdx.y;
int k = start_pixel_z + bnz* blockDim.z + threadIdx.z;
// If this thread actually correspond to an actual pixel and the mask value is not zero, calculate the fitting value.
// Note that the (mask[fidx]!=0) has to be put to the end to ensure the input fidx is a valid index due to short
// circuit logic.
int fidx= i*fy*fz + j*fz + k;
if ((i <= end_pixel_x) && (j <= end_pixel_y) && (k <= end_pixel_z) &&
(i < fx) && (j < fy) && (k < fz) && (mask[fidx]!=0) ){
// The normalized local coordinates t for calculating bspline coefficients
int bx = si;
int by = sj;
int bz = sk;
float tx = ((i+0.5) - bx * span_x)/span_x;
float ty = ((j+0.5) - by * span_y)/span_y;
float tz = ((k+0.5) - bz * span_z)/span_z;
// calculate B spline weight
float wx[4];
float wy[4];
float wz[4];
cubicBspline(tx, wx);
cubicBspline(ty, wy);
cubicBspline(tz, wz);
// Calculate phi
float wc_sum = 0.;
float wc;
float phi_c;
// calculate wc_sum
#pragma unroll
for (int ix=0; ix<4; ix++){
#pragma unroll
for (int iy=0; iy<4; iy++){
#pragma unroll
for (int iz=0; iz<4; iz++){
wc = wx[ix]* wy[iy]* wz[iz];
wc_sum += wc*wc;
}
}
}
// calculate wc2_phi and wc2
float fv = field[fidx]; // field value
#pragma unroll
for (int ix=0; ix<4; ix++){
#pragma unroll
for (int iy=0; iy<4; iy++){
#pragma unroll
for (int iz=0; iz<4; iz++){
wc = wx[ix]* wy[iy]* wz[iz];
phi_c = fv * wc / wc_sum;
// Locally accumulate wc2phic and wc2. This eliminates the need for
// each thread directly accumulate into global memory.
atomicAdd(&wc2phic_local[ix*16 + iy*4 + iz], wc*wc*phi_c);
atomicAdd(&wc2_local[ix*16 + iy*4 + iz], wc*wc);
}
}
}
}
__syncthreads();
// Each thread accumulates the local wc2phi and wc2 into the global memory.
for (int h=tx; h<64; h+=BLOCK_SIZE) {
// Need to translate the h into span index.
// So threadidx -> linearize -> for loop -> delinearize -> lattice index
int skk = sk + h % 4;
int sjj = sj + (h/4) % 4;
int sii = si + h/16;
atomicAdd(&wc2_phic[sii * ncpt_y * ncpt_z + sjj * ncpt_z + skk], wc2phic_local[h]);
atomicAdd(&wc2[sii * ncpt_y * ncpt_z + sjj * ncpt_z + skk], wc2_local[h]);
}
}
// This sets the image below certain threshold to 0.
__global__ void lowthreshold(
const float low_value,
const unsigned int numberOfPixel,
float * im,
float * mask){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=numberOfPixel)
return;
if (im[i]<=low_value){
im[i]=0;
mask[i]=0;
}
}
// The function that takes in the sharpening kernel and sharpen the image.
void sharpenImage(
// Input
float * d_before_sharpen, // image before sharpening. Does not set const here because the min/max reduction has to set the background value.
const float * d_mask, // the input mask
const unsigned int numberOfPixels, // number of pixel within the image
const unsigned int NumberOfHistogramBins, // number of histogram bin
const unsigned int paddedHistogramSize, // number of histogram bin after padded
const unsigned int histogramOffset, // histogram offset
const float WienerFilterNoise, // when building the wiener filter, the tiny constant at the denominator to prevent divide by 0
const float BiasFieldFullWidthAtHalfMaximum, // gaussian filter width
float * h_V, // real values
float * h_F,
float * h_U,
float * h_numerator,
float * h_denominator,
float * h_E,
std::vector<std::complex<float>> & h_Vf, // complex values
std::vector<std::complex<float>> & h_Ff,
std::vector<std::complex<float>> & h_Uf,
std::vector<std::complex<float>> & h_numeratorf,
std::vector<std::complex<float>> & h_denominatorf,
fftwf_plan & pf_v, // FFTW plans
fftwf_plan & pf_f,
fftwf_plan & pf_numerator,
fftwf_plan & pf_denominator,
fftwf_plan & pb_u,
fftwf_plan & pb_numerator,
fftwf_plan & pb_denominator,
// Output
float * d_after_sharpen // Image after sharpening
){
// Define variables
float binMaximum, binMinimum;
dim3 threads_1d(1024, 1, 1);
dim3 blocks_1d( (numberOfPixels+1023)/1024, 1, 1);
// buffer needed for the min/max operation
float * d_buffer;
float * d_histogram;
float * d_E;
checkCudaErrors(cudaMalloc((void **)&d_buffer, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_histogram, NumberOfHistogramBins*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_E, NumberOfHistogramBins*sizeof(float)));
checkCudaErrors(cudaMemset(d_histogram, 0, NumberOfHistogramBins*sizeof(float))); // Init histogram to 0
// Within the range defined by the mask, get the min/max of the before_sharpen image.
set_background_kernel<<< blocks_1d, threads_1d >>>(d_mask, 10000000000000.0, numberOfPixels, d_before_sharpen);
binMinimum = Reducer::reduce_min_wrapper(numberOfPixels, d_before_sharpen, d_buffer);
set_background_kernel<<< blocks_1d, threads_1d >>>(d_mask, -10000000000000.0, numberOfPixels, d_before_sharpen);
binMaximum = Reducer::reduce_max_wrapper(numberOfPixels, d_before_sharpen, d_buffer);
set_background_kernel<<< blocks_1d, threads_1d >>>(d_mask, 0.0, numberOfPixels, d_before_sharpen);
// Calculate how large is each bin
float histogramSlope = ( binMaximum - binMinimum )/( (float)NumberOfHistogramBins - 1. );
// Create the intensity profile (within the masked region, if applicable)
// using a triangular parzen windowing scheme (bullshit parzen windowing. Simply a
// histogram considering fractional count).
histogramGPU_kernel<<< blocks_1d, threads_1d, NumberOfHistogramBins*sizeof(float) >>>(
// Input
d_before_sharpen,
d_mask,
binMinimum,
histogramSlope,
numberOfPixels,
NumberOfHistogramBins,
// Output
d_histogram
);
checkCudaErrors(cudaMemcpy(&(h_V[histogramOffset]), d_histogram, NumberOfHistogramBins*sizeof(float), cudaMemcpyDeviceToHost));
// confirmed against the python example that the histogram is correct at this point.
// Calculate the fft on the histogram, h_V -> h_Vf
fftwf_execute(pf_v);
// create a equal-size-to-histogram gaussian filter, fft it.
// Since the histogram size here is small (for a 200 bin at most 512 is needed. Use the CPU
// implementation of the fftw instead of cuFFT).
// Create the Gaussian filter.
float scaledFWHM = BiasFieldFullWidthAtHalfMaximum / histogramSlope;
float expFactor = 4.0 * log( 2.0 ) / (scaledFWHM * scaledFWHM);
float scaleFactor = 2.0 * sqrt( log( 2.0 )/ M_PI ) / scaledFWHM;
// These parameters matches the python implementation
// printf("GPU: Histogram slope/scaledFWHM/expFactor/scaleFactor: (%f, %f, %f, %f)\n", histogramSlope, scaledFWHM, expFactor, scaleFactor);
h_F[0] = scaleFactor;
unsigned int halfSize = (unsigned int)(0.5 * paddedHistogramSize);
for( unsigned int i = 1; i <= halfSize; i++ ){
h_F[i] = h_F[paddedHistogramSize - i] =
scaleFactor * exp( -expFactor*i*i );
}
if( paddedHistogramSize % 2 == 0 ){
h_F[halfSize] = scaleFactor * exp(
-0.25 * paddedHistogramSize*paddedHistogramSize*expFactor );
}
// FFT the gaussian kernel, h_F -> h_Ff
fftwf_execute(pf_f);
// change the Ff to Gf, multiply that with Vf and output to Uf
for( unsigned int i = 0; i < (paddedHistogramSize/2+1); i++ ){
// Make the Wiener deconvolution kernel and multiply with the signal.
std::complex<float> c = conj(h_Ff[i]);
std::complex<float> Gf = c / ( c * h_Ff[i] + WienerFilterNoise );
h_Uf[i] = h_Vf[i] * Gf.real() / (float)paddedHistogramSize ;
}
// iFFT the deconvolved histogram and set clip negative real value to 0
// h_Uf -> h_U. Note that this compared to the python implementation does not
// do the normalization. So here need to divide the paddedHistogram to do the
// normalization.
fftwf_execute(pb_u);
for( unsigned int i = 0; i < paddedHistogramSize; i++ ){
h_U[i] = max( h_U[i], 0.0 );
}
// The numerator is histBin * U, where U = deconv(V)
for( unsigned int i = 0; i < paddedHistogramSize; i++ ){
h_numerator[i] = ( (float)binMinimum + ((float)i - histogramOffset) * histogramSlope ) * h_U[i];
}
// This is simply using the gaussian kernel h_Ff to smooth out the numerator.
// smooth(hisBin * h_U)
fftwf_execute(pf_numerator);
for( unsigned int i = 0; i < (paddedHistogramSize/2+1); i++){
h_numeratorf[i] *= h_Ff[i];
}
fftwf_execute(pb_numerator);
// h_U -> h_denominatorf. This use directly h_U as input.
// smooth(h_U)
// Again this simply smooth the denominator with the gaussian kernel h_Ff.
fftwf_execute(pf_denominator);
for( unsigned int i = 0; i < (paddedHistogramSize/2+1); i++ ){
h_denominatorf[i]*= h_Ff[i];
}
fftwf_execute(pb_denominator); // h_denominatorf -> h_denominator
// The divide part. smooth(hisBin * h_U)/smooth(h_U)
// Build a map of image from old histogram to new histogram
// This skip the amount of histogramOffset.
for( unsigned int i = 0; i < NumberOfHistogramBins; i++ ){
if( h_denominator[i+histogramOffset] != 0.0 ){
h_E[i] = h_numerator[i+histogramOffset] / h_denominator[i+histogramOffset];
} else {
h_E[i] = 0.0;
}
}
// Map the pixel value using the map.
checkCudaErrors(cudaMemcpy(d_E, h_E, NumberOfHistogramBins*sizeof(float), cudaMemcpyHostToDevice));
histogramMapping_kernel<<< blocks_1d, threads_1d >>>(
// Input
d_before_sharpen,
d_mask,
d_E,
binMinimum,
histogramSlope,
numberOfPixels, // num pixel in image
NumberOfHistogramBins,
// Output
d_after_sharpen // Image after sharpening
);
// Clean up
checkCudaErrors(cudaFree(d_buffer));
checkCudaErrors(cudaFree(d_histogram));
checkCudaErrors(cudaFree(d_E));
}
// This function serves to test the log image and sharpenImage.
void testsharpenImage(
// Input
const float * h_before_sharpen, // image before sharpening. Does not set const here because the min/max reduction has to set the background value.
const float * h_mask,
const unsigned int numberOfPixels,
N4Param & param,
N4Data & data,
// Output
float * h_before_sharpen_log,
float * h_after_sharpen,
float & binMin_out,
float & binMax_out){
float * d_before_sharpen;
float * d_before_sharpen_log;
float * d_mask;
float * d_after_sharpen;
checkCudaErrors(cudaMalloc((void **)&d_before_sharpen, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_mask, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_before_sharpen_log, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_after_sharpen, numberOfPixels*sizeof(float)));
// Copy the image and mask to gpu
checkCudaErrors(cudaMemcpy(d_before_sharpen, h_before_sharpen, numberOfPixels*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_mask, h_mask, numberOfPixels*sizeof(float), cudaMemcpyHostToDevice));
dim3 threads_1d(512, 1, 1);
dim3 blocks_1d((numberOfPixels+511)/512, 1, 1);
// Set the mask = 0 and im = 0 at the point where im<low_value
lowthreshold<<< blocks_1d, threads_1d >>>(
param.low_value,
numberOfPixels,
d_before_sharpen,
d_mask);
log_kernel<<< blocks_1d, threads_1d >>>(
// Input
d_before_sharpen,
d_mask,
numberOfPixels,
// Output
d_before_sharpen_log);
// Output the log image for checking
checkCudaErrors(cudaMemcpy(h_before_sharpen_log, d_before_sharpen_log, numberOfPixels*sizeof(float), cudaMemcpyDeviceToHost));
// Run the function
sharpenImage(
// Input
d_before_sharpen_log, // image before sharpening. Does not set const here because the min/max reduction has to set the background value.
d_mask, // the input mask
numberOfPixels, // number of pixel within the image
param.NumberOfHistogramBins, // number of histogram bin
param.paddedHistogramSize, // number of histogram bin after padded
param.histogramOffset, // histogram offset
param.WienerFilterNoise, // when building the wiener filter, the tiny constant at the denominator to prevent divide by 0
param.BiasFieldFullWidthAtHalfMaximum, // gaussian filter width
data.h_V, // real values
data.h_F,
data.h_U,
data.h_numerator,
data.h_denominator,
data.h_E,
data.h_Vf, // complex values
data.h_Ff,
data.h_Uf,
data.h_numeratorf,
data.h_denominatorf,
data.pf_v, // FFTW plans
data.pf_f,
data.pf_numerator,
data.pf_denominator,
data.pb_u,
data.pb_numerator,
data.pb_denominator,
// Output
d_after_sharpen // Image after sharpening
);
checkCudaErrors(cudaMemcpy(h_after_sharpen, d_after_sharpen, numberOfPixels*sizeof(float), cudaMemcpyDeviceToHost));
// Clean up
checkCudaErrors(cudaFree(d_before_sharpen));
checkCudaErrors(cudaFree(d_before_sharpen_log));
checkCudaErrors(cudaFree(d_mask));
checkCudaErrors(cudaFree(d_after_sharpen));
}
void calculateConvergenceMeasurement(
// Input
const float * d_field1,
const float * d_field2,
const float * d_mask,
const unsigned int numberOfPixels,
const float numberOfForeground,
// output
float & convergence){
dim3 threads_1d(512, 1, 1);
dim3 blocks_1d( (numberOfPixels+511)/512, 1, 1);
// calculate exp(field1 - field2)
float * d_pixel;
float * d_buffer;
checkCudaErrors(cudaMalloc((void **)&d_pixel, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_buffer, numberOfPixels*sizeof(float)));
subtract_and_exp_kernel<<< blocks_1d, threads_1d >>>(
// Input
d_field1,
d_field2,
// Output
numberOfPixels,
d_pixel);
// mean and std of the mask region
set_background_kernel<<< blocks_1d, threads_1d >>>(d_mask, 0, numberOfPixels, d_pixel);
float mu = Reducer::reduce_sum_wrapper(numberOfPixels, d_pixel, d_buffer);
mu /= numberOfForeground;
// calculate (X - mean)^2
subtract_mean_and_sqr_kernel<<< blocks_1d, threads_1d >>>(
d_pixel,
mu,
numberOfPixels);
// Divide sum by N-1 and sqrt
set_background_kernel<<< blocks_1d, threads_1d >>>(d_mask, 0, numberOfPixels, d_pixel);
float sigma = Reducer::reduce_sum_wrapper(numberOfPixels, d_pixel, d_buffer);
sigma /= (numberOfForeground-1);
sigma = sqrt(sigma);
// output
convergence = sigma/mu;
checkCudaErrors(cudaFree(d_pixel));
checkCudaErrors(cudaFree(d_buffer));
}
// This function does the upsampling of the lattice on the CPU
// This function assume input to be (nx, ny, nz) number of control points
// and output to be (nx-3)*2 +3 number of control points.
// As suggested by the Bspline paper, this always upsample a lattice of size
// n+3 to 2n+3
void upsampleLattice3D_cpu(
// Input
const float * h_lattice,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
// Output
float * h_lattice_upsample){
float bw[2][3] = {
{1./8, 6./8, 1./8},
{0, 1./2, 1./2}
};
unsigned int nx = ncpt_x - 3;
unsigned int ny = ncpt_y - 3;
unsigned int nz = ncpt_z - 3;
// Size of the upsample lattice
unsigned int n2x = 2*nx + 3;
unsigned int n2y = 2*ny + 3;
unsigned int n2z = 2*nz + 3;
float lattice_piece[27];
// Loop through each point within the low resolution lattice
for (int i=-1; i<nx+2; i++){
for (int j=-1; j<ny+2; j++){
for (int k=-1; k<nz+2; k++){
// Each takes care the neighboring 8 points on the upsampled points
// as a linear combination of the neighboring 3x3x3 piece
// gather the lattice piece
// i/j/kl: (i-1)~(i+1)
for (int il = i-1; il<i+2; il++){
for (int jl = j-1; jl<j+2; jl++){
for (int kl = k-1; kl<k+2; kl++){
// i/j/klp: 0~2
int ilp = il-i+1;
int jlp = jl-j+1;
int klp = kl-k+1;
// clip the dimension within the lattice range
int ilc = min(max(il, -1), nx+1) +1;
int jlc = min(max(jl, -1), ny+1) +1;
int klc = min(max(kl, -1), nz+1) +1;
lattice_piece[ilp*9 + jlp * 3 + klp] =
h_lattice[ ilc * ncpt_y * ncpt_z + jlc * ncpt_z + klc];
}
}
}
// Do the accumulation for the 8 elements
for (int i2=0; i2<2; i2++){
for (int j2=0; j2<2; j2++){
for (int k2=0; k2<2; k2++){
int i2l = 2*i + i2;
int j2l = 2*j + j2;
int k2l = 2*k + k2;
// if statement to
if ((i2l>=-1) && (i2l<=2*nx+1) &&
(j2l>=-1) && (j2l<=2*ny+1) &&
(k2l>=-1) && (k2l<=2*nz+1)
){
// Each elements are sum of 27 terms.
float ls = 0.;
for (int bx=0; bx<3; bx++){
for (int by=0; by<3; by++){
for (int bz=0; bz<3; bz++){
ls+=bw[i2][bx]*bw[j2][by]*bw[k2][bz] * lattice_piece[bx * 9 + by * 3 + bz];
}
}
}
// Put the summed element into the array
h_lattice_upsample[(i2l+1) * n2y * n2z +
(j2l+1) * n2z +
k2l+1] = ls;
}
}
}
}
}
}
}
}
void upsampleLattice3D_gpu(
// Input
const float * d_lattice,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
// Output
float * d_lattice_upsample){
unsigned int nx = ncpt_x - 3;
unsigned int ny = ncpt_y - 3;
unsigned int nz = ncpt_z - 3;
// Size of the upsample lattice
unsigned int n2x = 2*nx + 3;
unsigned int n2y = 2*ny + 3;
unsigned int n2z = 2*nz + 3;
dim3 blocks_3d(ncpt_x, ncpt_y, ncpt_z);
dim3 threads_3d(2, 2, 2);
upsample_lattice_kernel_3D<<< blocks_3d, threads_3d >>>(
// Input
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
nx, // ncpt - 3, the base number
ny,
nz,
n2x, // number of control points on next level
n2y,
n2z,
d_lattice,
// Output
d_lattice_upsample
);
}
void testUpsampleLattice3D_gpu(
// Input
const float * h_lattice,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
// Output
float * h_lattice_upsample){
unsigned int numberOfLattice = ncpt_x * ncpt_y * ncpt_z;
// number of control points of the upsampled one.
unsigned int ncpt_x_n = (ncpt_x - 3) * 2 + 3;
unsigned int ncpt_y_n = (ncpt_y - 3) * 2 + 3;
unsigned int ncpt_z_n = (ncpt_z - 3) * 2 + 3;
unsigned int numberOfLattice_n = ncpt_x_n * ncpt_y_n * ncpt_z_n;
float * d_lattice;
float * d_lattice_upsample;
checkCudaErrors(cudaMalloc((void **)&d_lattice, numberOfLattice*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_lattice_upsample, numberOfLattice_n*sizeof(float)));
checkCudaErrors(cudaMemcpy(d_lattice, h_lattice, numberOfLattice*sizeof(float), cudaMemcpyHostToDevice));
upsampleLattice3D_gpu(
// Input
d_lattice,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
// Output
d_lattice_upsample);
checkCudaErrors(cudaMemcpy(h_lattice_upsample, d_lattice_upsample, numberOfLattice_n*sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_lattice));
checkCudaErrors(cudaFree(d_lattice_upsample));
}
// This function evaluates Bspline on the field.
// To reduce total amount of global read, this again applies the same memory reading scheme
// as the one on on the fitting function.
void EvaluateBspline3D(
// Input
const float * d_lattice,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
const unsigned int fx, // size of the output field
const unsigned int fy,
const unsigned int fz,
// Output
float * d_fitted){
unsigned int numberOfSpansX = ncpt_x - 3;
unsigned int numberOfSpansY = ncpt_y - 3;
unsigned int numberOfSpansZ = ncpt_z - 3;
float span_x = (float)fx / numberOfSpansX;
float span_y = (float)fy / numberOfSpansY;
float span_z = (float)fz / numberOfSpansZ;
// If the spans is smaller than 8, then use the spans size for each block.
// Note that the threadblock size 8x8x8 on GTX 1080 with driver 418 doesn't work.
// The kernel simply silently not launch. Switch to 6x6x6 makes it work.
// int thread_x = (int)ceil(fmin(span_x, 8.f));
// int thread_y = (int)ceil(fmin(span_y, 8.f));
// int thread_z = (int)ceil(fmin(span_z, 8.f));
int thread_x = (int)ceil(fmin(span_x, 6.f));
int thread_y = (int)ceil(fmin(span_y, 6.f));
int thread_z = (int)ceil(fmin(span_z, 6.f));
int blocks_per_span_x = (int)ceil(span_x / (float)thread_x);
int blocks_per_span_y = (int)ceil(span_y / (float)thread_y);
int blocks_per_span_z = (int)ceil(span_z / (float)thread_z);
int num_block_x = numberOfSpansX * blocks_per_span_x;
int num_block_y = numberOfSpansY * blocks_per_span_y;
int num_block_z = numberOfSpansZ * blocks_per_span_z;
dim3 threads_3d(thread_x, thread_y, thread_z);
dim3 blocks_3d( num_block_x , num_block_y, num_block_z);
evaluate_bspline_kernel_3D<<< blocks_3d, threads_3d >>>(
// Output
d_lattice,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
blocks_per_span_x, // How many block is there per span
blocks_per_span_y,
blocks_per_span_z,
span_x, // How long each span is
span_y,
span_z,
fx, // What is the output field size
fy,
fz,
// Output
d_fitted
);
}
void testEvaluateBspline3D(// Input
const float * h_lattice,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
const unsigned int fx, // size of the output field
const unsigned int fy,
const unsigned int fz,
// Output
float * h_fitted){
float * d_lattice;
float * d_fitted;
unsigned int numberOfLattice = ncpt_x * ncpt_y * ncpt_z;
unsigned int numberOfPixels = fx * fy * fz;
checkCudaErrors(cudaMalloc((void **)&d_lattice, numberOfLattice*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_fitted, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMemcpy(d_lattice, h_lattice, numberOfLattice*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemset(d_fitted, 0, numberOfLattice*sizeof(float)));
EvaluateBspline3D(
// Input
d_lattice,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
fx, // size of the output field
fy,
fz,
// Output
d_fitted);
checkCudaErrors(cudaMemcpy(h_fitted, d_fitted, numberOfPixels*sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_lattice));
checkCudaErrors(cudaFree(d_fitted));
}
// This given a field and a mask, fits a Bspline to the field according to basic
// algorithm from paper "Scattered Data Interpolation with Multilevel B-Splines".
// The block/thread/shared memory layout is exactly the same as EvaluateBspline3D
// except read/write were reversed.
void FitBspline3D(
// Input
const float * d_field,
const float * d_mask,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
const unsigned int fx, // size of the output field
const unsigned int fy,
const unsigned int fz,
// Output
float * d_lattice
){
// Initialize the numeratora and the denominator
unsigned int n_lattice = ncpt_x * ncpt_y * ncpt_z;
float * d_wc2_phic;
float * d_wc2;
float * d_buffer;
checkCudaErrors(cudaMalloc((void **)&d_wc2_phic, n_lattice*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_wc2, n_lattice*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_buffer, n_lattice*sizeof(float)));
checkCudaErrors(cudaMemset(d_wc2_phic, 0, n_lattice*sizeof(float)));
checkCudaErrors(cudaMemset(d_wc2, 0, n_lattice*sizeof(float)));
// Same block/thread/shared memory layout as the EvaluateBspline3D.
unsigned int numberOfSpansX = ncpt_x - 3;
unsigned int numberOfSpansY = ncpt_y - 3;
unsigned int numberOfSpansZ = ncpt_z - 3;
float span_x = 1.0*fx / numberOfSpansX;
float span_y = 1.0*fy / numberOfSpansY;
float span_z = 1.0*fz / numberOfSpansZ;
// If the spans is smaller than 8, then use the spans size for each block.
// int thread_x = ceil(fmin(span_x, 8.f));
// int thread_y = ceil(fmin(span_y, 8.f));
// int thread_z = ceil(fmin(span_z, 8.f));
int thread_x = ceil(fmin(span_x, 6.f));
int thread_y = ceil(fmin(span_y, 6.f));
int thread_z = ceil(fmin(span_z, 6.f));
int blocks_per_span_x = ceil(span_x / thread_x);
int blocks_per_span_y = ceil(span_y / thread_y);
int blocks_per_span_z = ceil(span_z / thread_z);
int num_block_x = numberOfSpansX * blocks_per_span_x;
int num_block_y = numberOfSpansY * blocks_per_span_y;
int num_block_z = numberOfSpansZ * blocks_per_span_z;
dim3 threads_3d(thread_x, thread_y, thread_z);
dim3 blocks_3d(num_block_x , num_block_y, num_block_z);
// accumulate wc2_phi and wc2
accumulate_WC2phic_and_WC2<<< blocks_3d, threads_3d >>>(
// Input
d_field,
d_mask,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
blocks_per_span_x, // How many block is there per span
blocks_per_span_y,
blocks_per_span_z,
span_x, // How long each span is
span_y,
span_z,
fx, // What is the input field size
fy,
fz,
// Output
d_wc2_phic,
d_wc2
);
// lattice = wc2_phic / d_wc2
dim3 threads_1d(512, 1, 1);
dim3 blocks_1d((n_lattice+511)/512, 1, 1);
divide_kernel<<< blocks_1d, threads_1d >>>(
// Input
d_wc2_phic,
d_wc2,
n_lattice,
// Output
d_lattice);
// clean up
checkCudaErrors(cudaFree(d_wc2_phic));
checkCudaErrors(cudaFree(d_wc2));
checkCudaErrors(cudaFree(d_buffer));
}
void testFitBspline3D(
// Input
const float * h_field,
const float * h_mask,
const unsigned int ncpt_x, // number of control points
const unsigned int ncpt_y,
const unsigned int ncpt_z,
const unsigned int fx, // size of the output field
const unsigned int fy,
const unsigned int fz,
// Output
float * h_lattice
){
float * d_field;
float * d_mask;
float * d_lattice;
unsigned int numberOfLattice = ncpt_x * ncpt_y * ncpt_z;
unsigned int numberOfPixels = fx * fy * fz;
checkCudaErrors(cudaMalloc((void **)&d_field, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_mask, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_lattice, numberOfLattice*sizeof(float)));
checkCudaErrors(cudaMemcpy(d_field, h_field, numberOfPixels*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_mask, h_mask, numberOfPixels*sizeof(float), cudaMemcpyHostToDevice));
FitBspline3D(
// Input
d_field,
d_mask,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
fx, // size of the output field
fy,
fz,
// Output
d_lattice
);
checkCudaErrors(cudaMemcpy(h_lattice, d_lattice, numberOfLattice*sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_field));
checkCudaErrors(cudaFree(d_mask));
checkCudaErrors(cudaFree(d_lattice));
}
// The overall N4 function
void N4(
// Input
float * d_im, // The input image
float * d_mask, // The mask. Right now only binary mask are supported.
const unsigned int fx, // What is the input field size
const unsigned int fy,
const unsigned int fz,
N4Param & param,
N4Data & data, // Some additional data structure needed to run the function,
// mostly are fft stuff needed for fftw.
// Output
float * d_im_normalized, // The image after normalization
float * d_lattice, // The resulting lattice representing the bias field.
// Size of this should be equal to 2^(number of level-1) * (initial_point-3) + 3
// Optional. Also output the bias field
float * d_biasField
){
unsigned int numberOfPixels = fx * fy * fz;
// Define variables
unsigned int ncpt_x = param.NumberOfControlPoints_x; // number of control points
unsigned int ncpt_y = param.NumberOfControlPoints_y;
unsigned int ncpt_z = param.NumberOfControlPoints_z;
unsigned int numberOfLattice = ncpt_x * ncpt_y * ncpt_z;
unsigned int ncpt_x_n; // number of control points on next level
unsigned int ncpt_y_n;
unsigned int ncpt_z_n;
unsigned int numberOfLattice_n;
float CurrentConvergenceMeasurement;
unsigned int elapsedIterations;
dim3 threads_1d(512, 1, 1);
dim3 blocks_1d((numberOfPixels+511)/512, 1, 1);
// Set the mask = 0 and im = 0 at the point where im<low_value
lowthreshold<<< blocks_1d, threads_1d >>>(
param.low_value,
numberOfPixels,
d_im,
d_mask);
float * h_buffer = new float[numberOfPixels];
float spacing[] = {1,1,1};
int size[] = {(int)fz, (int)fy, (int)fx};
float * d_im_log;
float * d_logUncorrectedImage;
float * d_logSharpenedImage;
float * d_residualBiasField;
float * d_logBiasField;
float * d_newLogBiasField;
float * d_buffer;
float * d_temp;
float * d_lattice_c_residual; // current level lattice fitted to the residual
float * d_lattice_c; // current level lattice accumulated
float * d_lattice_n; // next level lattice
// Assign space
checkCudaErrors(cudaMalloc((void **)&d_im_log, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_logUncorrectedImage, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_logSharpenedImage, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_residualBiasField, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_logBiasField, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_newLogBiasField, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_buffer, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMemset(d_im_log, 0, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMemset(d_logBiasField, 0, numberOfPixels*sizeof(float))); // set the logBiasField initially to 0.
checkCudaErrors(cudaMemset(d_buffer, 0, numberOfPixels*sizeof(float))); // set the logBiasField initially to 0.
log_kernel<<< blocks_1d, threads_1d >>>(
// Input
d_im,
d_mask,
numberOfPixels,
// Output
d_im_log);
float numberOfForeground = Reducer::reduce_sum_wrapper(numberOfPixels, d_mask, d_buffer);
checkCudaErrors(cudaMemcpy(d_logUncorrectedImage, d_im_log, numberOfPixels*sizeof(float), cudaMemcpyDeviceToDevice));
// assign lattice space for the first level
checkCudaErrors(cudaMalloc((void **)&d_lattice_c, numberOfLattice*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_lattice_c_residual, numberOfLattice*sizeof(float)));
checkCudaErrors(cudaMemset(d_lattice_c, 0, numberOfLattice*sizeof(float))); // Init the initial lattice to 0.
for (int currentLevel = 0; currentLevel<param.NumberOfFittingLevels; currentLevel++){
// Calculate number of control points at this level
// Currently the fitting resolution of the 3 axes are tied together. Later maybe
// we can make it separate. Anyway.
CurrentConvergenceMeasurement = 10000000000.0;
elapsedIterations = 0;
dim3 threads_1d_lattice(512, 1, 1);
dim3 blocks_1d_lattice((numberOfLattice+511)/512, 1, 1);
while ((CurrentConvergenceMeasurement > param.ConvergenceThreshold) &&
(elapsedIterations<param.MaximumNumberOfIterations)){
printf("Level %d, iter %d\n", currentLevel, elapsedIterations);
elapsedIterations++;
// Sharpen the image
sharpenImage(
// Input
d_logUncorrectedImage, // image before sharpening. Does not set const here because the min/max reduction has to set the background value.
d_mask, // the input mask
numberOfPixels, // number of pixel within the image
param.NumberOfHistogramBins, // number of histogram bin
param.paddedHistogramSize, // number of histogram bin after padded
param.histogramOffset, // histogram offset
param.WienerFilterNoise, // when building the wiener filter, the tiny constant at the denominator to prevent divide by 0
param.BiasFieldFullWidthAtHalfMaximum, // gaussian filter width
data.h_V, // real values
data.h_F,
data.h_U,
data.h_numerator,
data.h_denominator,
data.h_E,
data.h_Vf, // complex values
data.h_Ff,
data.h_Uf,
data.h_numeratorf,
data.h_denominatorf,
data.pf_v, // FFTW plans
data.pf_f,
data.pf_numerator,
data.pf_denominator,
data.pb_u,
data.pb_numerator,
data.pb_denominator,
// Output
d_logSharpenedImage // Image after sharpening
);
// Benchmark code- Start
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start); // Benchmark code
subtract<<< blocks_1d, threads_1d >>>(
// Input
d_logUncorrectedImage,
d_logSharpenedImage,
numberOfPixels,
// Output
d_residualBiasField);
// Fit a new Bspline lattice to the current residual field
// clear the
FitBspline3D(
// Input
d_residualBiasField,
d_mask,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
fx, // size of the output field
fy,
fz,
// Output
d_lattice_c_residual
);
// Accumulate the residual lattice in overall lattice
sum_inplace_kernel<<< blocks_1d_lattice, threads_1d_lattice >>>(
// Input
d_lattice_c, // output
d_lattice_c_residual,
numberOfLattice);
// calculate the new bias field
EvaluateBspline3D(
// Input
d_lattice_c,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
fx, // size of the output field
fy,
fz,
// Output
d_newLogBiasField);
// Benchmark code - End
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Smooth image takes %f ms\n.", milliseconds);
// calculate convergence
calculateConvergenceMeasurement(
// Input
d_logBiasField,
d_newLogBiasField,
d_mask,
numberOfPixels,
numberOfForeground,
// output
param.ConvergenceThreshold);
// Update the logBiasField. Use pointer swap here to save time on copying data.
d_temp = d_logBiasField;
d_logBiasField = d_newLogBiasField;
d_newLogBiasField = d_temp;
// Update logUncorrectedImage
subtract<<< blocks_1d, threads_1d >>>(
// Input
d_im_log,
d_logBiasField,
numberOfPixels,
// Output
d_logUncorrectedImage);
}
// Upsample the lattice if not the last level
if (currentLevel!=param.NumberOfFittingLevels-1){
ncpt_x_n = (ncpt_x - 3) * 2 + 3; // number of control points. Since we don't how many points initially the user input so we iterate like this.
ncpt_y_n = (ncpt_y - 3) * 2 + 3;
ncpt_z_n = (ncpt_z - 3) * 2 + 3;
numberOfLattice_n = ncpt_x_n * ncpt_y_n * ncpt_z_n;
// Assign memory for the next level lattice
checkCudaErrors(cudaMalloc((void **)&d_lattice_n, numberOfLattice_n*sizeof(float)));
// Upsample
upsampleLattice3D_gpu(
// Input
d_lattice_c,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
// Output
d_lattice_n);
// Free up the current lattice and pointer swap to next
checkCudaErrors(cudaFree(d_lattice_c));
checkCudaErrors(cudaFree(d_lattice_c_residual));
d_lattice_c = d_lattice_n;
// lattice_residual for next level
checkCudaErrors(cudaMalloc((void **)&d_lattice_c_residual, numberOfLattice_n*sizeof(float)));
// Update number of control points to next level
ncpt_x = ncpt_x_n;
ncpt_y = ncpt_y_n;
ncpt_z = ncpt_z_n;
numberOfLattice = numberOfLattice_n;
}
}
// Final normalization
// calculate the final field
EvaluateBspline3D(
// Input
d_lattice_c,
ncpt_x, // number of control points
ncpt_y,
ncpt_z,
fx, // size of the output field
fy,
fz,
// Output
d_logBiasField);
exp_and_divide_kernel<<< blocks_1d, threads_1d >>>(
// Input
d_logBiasField,
d_im,
numberOfPixels,
// output
d_im_normalized);
// Output the bias field
if (d_biasField != NULL){
exp_kernel<<< blocks_1d, threads_1d >>>(
// Input
d_logBiasField,
numberOfPixels,
// output
d_biasField);
}
delete [] h_buffer;
// Save the lattice to output
checkCudaErrors(cudaMemcpy(d_lattice, d_lattice_c, numberOfLattice*sizeof(float), cudaMemcpyDeviceToDevice));
// clean up
checkCudaErrors(cudaFree(d_im_log));
checkCudaErrors(cudaFree(d_logUncorrectedImage));
checkCudaErrors(cudaFree(d_logSharpenedImage));
checkCudaErrors(cudaFree(d_residualBiasField));
checkCudaErrors(cudaFree(d_logBiasField));
checkCudaErrors(cudaFree(d_newLogBiasField));
checkCudaErrors(cudaFree(d_buffer));
checkCudaErrors(cudaFree(d_lattice_c));
checkCudaErrors(cudaFree(d_lattice_c_residual));
}
void testN4(
// Input
float * h_im, // The input image
float * h_mask, // The mask. Right now only binary mask are supported.
const unsigned int fx, // What is the input field size
const unsigned int fy,
const unsigned int fz,
N4Param & param,
N4Data & data, // Some additional data structure needed to run the function,
// mostly are fft stuff needed for fftw.
// Output
float * h_im_normalized, // The image after normalization
float * h_lattice, // The resulting lattice representing the bias field.
// Size of this should be equal to 2^(number of level-1) * (initial_point-3) + 3
// Optional. Also output the bias field
float * h_biasField
){
float * d_im;
float * d_im_normalized;
float * d_mask;
float * d_lattice;
float * d_biasField;
unsigned int ncpt_x = param.NumberOfControlPoints_x;
unsigned int ncpt_y = param.NumberOfControlPoints_y;
unsigned int ncpt_z = param.NumberOfControlPoints_z;
for (unsigned int i=0; i< param.NumberOfFittingLevels-1; i++){
ncpt_x=(ncpt_x-3)*2+3;
ncpt_y=(ncpt_y-3)*2+3;
ncpt_z=(ncpt_z-3)*2+3;
}
unsigned int numberOfLattice = ncpt_x * ncpt_y * ncpt_z;
unsigned int numberOfPixels = fx * fy * fz;
printf("Test lattice size: %d\n", numberOfLattice);
checkCudaErrors(cudaMalloc((void **)&d_im, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_mask, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_im_normalized, numberOfPixels*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_lattice, numberOfLattice*sizeof(float)));
// Optional. Also output the bias field
if (h_biasField!=NULL){
checkCudaErrors(cudaMalloc((void **)&d_biasField, numberOfPixels*sizeof(float)));
}else{
d_biasField = NULL;
}
checkCudaErrors(cudaMemcpy(d_im, h_im, numberOfPixels*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_mask, h_mask, numberOfPixels*sizeof(float), cudaMemcpyHostToDevice));
N4(
// Input
d_im, // The input image
d_mask, // The mask. Right now only binary mask are supported.
fx, // What is the input field size
fy,
fz,
param,
data, // Some additional data structure needed to run the function,
// mostly are fft stuff needed for fftw.
// Output
d_im_normalized, // The image after normalization
d_lattice, // The resulting lattice representing the bias field.
// Size of this should be equal to 2^(number of level-1) * (initial_point-3) + 3
// Optional. Also output the bias field
d_biasField
);
checkCudaErrors(cudaMemcpy(h_im_normalized, d_im_normalized, numberOfPixels*sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_lattice, d_lattice, numberOfLattice*sizeof(float), cudaMemcpyDeviceToHost));
// Optional. Also output the bias field
if (h_biasField!=NULL){
checkCudaErrors(cudaMemcpy(h_biasField, d_biasField, numberOfPixels*sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_biasField));
}
checkCudaErrors(cudaFree(d_im));
checkCudaErrors(cudaFree(d_mask));
checkCudaErrors(cudaFree(d_im_normalized));
checkCudaErrors(cudaFree(d_lattice));
}
|
a407ada7239c1d7e80b97935503a87096ae8c24b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <fstream>
#include <iostream>
#include <chrono>
#define CHECK_CUDA_ERR(cudaerr) \
{ \
auto err = cudaerr; \
if (err != hipSuccess) { \
printf("kernel launch failed with error \"%s\".\n",\
hipGetErrorString(err)); \
exit(1); \
} \
}
__device__ void color_pixel(
char *colors, char *pixels,
double c_re, double c_im,
int global_index, int max_iter) {
double i = 0, j = 0;
int iteration = 0;
while ( i*i + j*j < 4 && iteration < max_iter) {
double i_new = i*i - j*j + c_re;
j = 2*i*j + c_im;
i = i_new;
iteration++;
}
int color_index = global_index * 3;
if (iteration < max_iter) {
pixels[color_index] = colors[3 + iteration * 3];
pixels[color_index + 1] = colors[3 + iteration * 3 + 1];
pixels[color_index + 2] = colors[3 + iteration * 3 + 2];
} else {
pixels[color_index] = colors[0];
pixels[color_index + 1] = colors[1];
pixels[color_index + 2] = colors[2];
}
}
__global__ void mandelbrot(char *colors, char* pixels, int height, int width, int max_iter) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
int x = global_index % width;
int y = global_index / width;
double c_re = ((double)x - (double)width / 2.0) * 4.0 / width;
double c_im = ((double)y - (double)height / 2.0) * 4.0 / width;
if (global_index < height * width)
color_pixel(colors, pixels, c_re, c_im, global_index, max_iter);
}
void fill_colors(char *colors, int max_iter) {
colors[0] = 200;
colors[1] = 200;
colors[2] = 200;
int shade = 1, speed1 = 0, speed2 = 10, speed3 = 0, j = 1;
for (int i = 0; i < max_iter; i+=3) {
if (j % 50 == 0)
shade <<= 1;
int red = colors[0] + i * speed1 - j;
int green = colors[1] + i * speed2;
int blue = colors[2] + i * speed3 - j;
if (red < 0) red = 0;
if (green < 0) green = 0;
if (blue < 0) blue = 0;
colors[3 + i] = (red) % (256 / shade);
colors[3 + i + 1] = (green) % (256 / shade);
colors[3 + i + 2] = (blue) % (256 / shade);
j += 1;
}
}
int main(int argc, char **argv) {
int write_to_file_flag = std::atoi(argv[1]);
int x_pixels = 19968, y_pixels = 13730, max_iter = 150;
int n_pixels = x_pixels * y_pixels;
char *host_pixels, *device_pixels, *host_colors, *device_colors;
size_t pixel_size = sizeof(char) * n_pixels * 3; // * 3 for RGB
// This allocates pinned memory to speed-up memory transfers
host_pixels = (char*)malloc(pixel_size);
CHECK_CUDA_ERR(hipMalloc(&device_pixels, pixel_size));
size_t color_size = sizeof(char) * (max_iter * 3 + 3);
CHECK_CUDA_ERR(hipHostMalloc(&host_colors, color_size));
CHECK_CUDA_ERR(hipMalloc(&device_colors, color_size));
fill_colors(host_colors, max_iter);
CHECK_CUDA_ERR(hipMemcpy(device_colors, host_colors, color_size, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipDeviceSynchronize());
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( mandelbrot), dim3((32 + n_pixels) / 32), dim3(32), 0, 0,
/*colors=*/device_colors,
/*pixels=*/device_pixels,
/*height=*/y_pixels,
/*width=*/x_pixels,
/*max_iter*/max_iter);
CHECK_CUDA_ERR(hipDeviceSynchronize());
auto end = std::chrono::steady_clock::now();
std::cout << "RUN "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< std::endl;
start = std::chrono::steady_clock::now();
CHECK_CUDA_ERR(hipMemcpy(host_pixels, device_pixels, pixel_size, hipMemcpyDeviceToHost));
CHECK_CUDA_ERR(hipDeviceSynchronize());
end = std::chrono::steady_clock::now();
std::cout << "READ "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< std::endl;
if (write_to_file_flag) {
long long current_time = time(nullptr);
std::ofstream image (std::to_string(current_time).append("-gpu.bmp"), std::ofstream::binary);
image <<
(uint8_t)0x42 <<
(uint8_t)0x4D <<
(uint8_t)0x7C <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x1A <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x0C <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 << // Image Width
(uint8_t)0x4E << // Image Width
(uint8_t)0xA2 << // Image Height
(uint8_t)0x45 << // Image height
(uint8_t)0x01 <<
(uint8_t)0x00 <<
(uint8_t)0x18 <<
(uint8_t)0x00;
for (int i = 0; i < n_pixels * 3; i++)
image << host_pixels[i];
image << 0x00 << 0x00;
}
free(host_pixels);
CHECK_CUDA_ERR(hipHostFree(host_colors));
CHECK_CUDA_ERR(hipFree(device_pixels));
CHECK_CUDA_ERR(hipFree(device_colors));
return 0;
}
| a407ada7239c1d7e80b97935503a87096ae8c24b.cu | #include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <fstream>
#include <iostream>
#include <chrono>
#define CHECK_CUDA_ERR(cudaerr) \
{ \
auto err = cudaerr; \
if (err != cudaSuccess) { \
printf("kernel launch failed with error \"%s\".\n",\
cudaGetErrorString(err)); \
exit(1); \
} \
}
__device__ void color_pixel(
char *colors, char *pixels,
double c_re, double c_im,
int global_index, int max_iter) {
double i = 0, j = 0;
int iteration = 0;
while ( i*i + j*j < 4 && iteration < max_iter) {
double i_new = i*i - j*j + c_re;
j = 2*i*j + c_im;
i = i_new;
iteration++;
}
int color_index = global_index * 3;
if (iteration < max_iter) {
pixels[color_index] = colors[3 + iteration * 3];
pixels[color_index + 1] = colors[3 + iteration * 3 + 1];
pixels[color_index + 2] = colors[3 + iteration * 3 + 2];
} else {
pixels[color_index] = colors[0];
pixels[color_index + 1] = colors[1];
pixels[color_index + 2] = colors[2];
}
}
__global__ void mandelbrot(char *colors, char* pixels, int height, int width, int max_iter) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
int x = global_index % width;
int y = global_index / width;
double c_re = ((double)x - (double)width / 2.0) * 4.0 / width;
double c_im = ((double)y - (double)height / 2.0) * 4.0 / width;
if (global_index < height * width)
color_pixel(colors, pixels, c_re, c_im, global_index, max_iter);
}
void fill_colors(char *colors, int max_iter) {
colors[0] = 200;
colors[1] = 200;
colors[2] = 200;
int shade = 1, speed1 = 0, speed2 = 10, speed3 = 0, j = 1;
for (int i = 0; i < max_iter; i+=3) {
if (j % 50 == 0)
shade <<= 1;
int red = colors[0] + i * speed1 - j;
int green = colors[1] + i * speed2;
int blue = colors[2] + i * speed3 - j;
if (red < 0) red = 0;
if (green < 0) green = 0;
if (blue < 0) blue = 0;
colors[3 + i] = (red) % (256 / shade);
colors[3 + i + 1] = (green) % (256 / shade);
colors[3 + i + 2] = (blue) % (256 / shade);
j += 1;
}
}
int main(int argc, char **argv) {
int write_to_file_flag = std::atoi(argv[1]);
int x_pixels = 19968, y_pixels = 13730, max_iter = 150;
int n_pixels = x_pixels * y_pixels;
char *host_pixels, *device_pixels, *host_colors, *device_colors;
size_t pixel_size = sizeof(char) * n_pixels * 3; // * 3 for RGB
// This allocates pinned memory to speed-up memory transfers
host_pixels = (char*)malloc(pixel_size);
CHECK_CUDA_ERR(cudaMalloc(&device_pixels, pixel_size));
size_t color_size = sizeof(char) * (max_iter * 3 + 3);
CHECK_CUDA_ERR(cudaMallocHost(&host_colors, color_size));
CHECK_CUDA_ERR(cudaMalloc(&device_colors, color_size));
fill_colors(host_colors, max_iter);
CHECK_CUDA_ERR(cudaMemcpy(device_colors, host_colors, color_size, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaDeviceSynchronize());
auto start = std::chrono::steady_clock::now();
mandelbrot<<<(32 + n_pixels) / 32, 32>>>(
/*colors=*/device_colors,
/*pixels=*/device_pixels,
/*height=*/y_pixels,
/*width=*/x_pixels,
/*max_iter*/max_iter);
CHECK_CUDA_ERR(cudaDeviceSynchronize());
auto end = std::chrono::steady_clock::now();
std::cout << "RUN "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< std::endl;
start = std::chrono::steady_clock::now();
CHECK_CUDA_ERR(cudaMemcpy(host_pixels, device_pixels, pixel_size, cudaMemcpyDeviceToHost));
CHECK_CUDA_ERR(cudaDeviceSynchronize());
end = std::chrono::steady_clock::now();
std::cout << "READ "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< std::endl;
if (write_to_file_flag) {
long long current_time = time(nullptr);
std::ofstream image (std::to_string(current_time).append("-gpu.bmp"), std::ofstream::binary);
image <<
(uint8_t)0x42 <<
(uint8_t)0x4D <<
(uint8_t)0x7C <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x1A <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x0C <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 << // Image Width
(uint8_t)0x4E << // Image Width
(uint8_t)0xA2 << // Image Height
(uint8_t)0x45 << // Image height
(uint8_t)0x01 <<
(uint8_t)0x00 <<
(uint8_t)0x18 <<
(uint8_t)0x00;
for (int i = 0; i < n_pixels * 3; i++)
image << host_pixels[i];
image << 0x00 << 0x00;
}
free(host_pixels);
CHECK_CUDA_ERR(cudaFreeHost(host_colors));
CHECK_CUDA_ERR(cudaFree(device_pixels));
CHECK_CUDA_ERR(cudaFree(device_colors));
return 0;
}
|
25ca68af6ba81c4c522299d0d0edc808b01be744.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Tingxing Dong
@author Azzam Haidar
@generated from magmablas/ztrsv_batched.cu normal z -> c, Tue Feb 9 16:05:39 2016
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "batched_kernel_param.h"
#define PRECISION_c
#define NB 256 //NB is the 1st level blocking in recursive blocking, BLOCK_SIZE is the 2ed level, NB=256, BLOCK_SIZE=64 is optimal for batched
#define NUM_THREADS 128 //64 //128
#define BLOCK_SIZE_N 128
#define DIM_X_N 128
#define DIM_Y_N 1
#define BLOCK_SIZE_T 32
#define DIM_X_T 16
#define DIM_Y_T 8
#include "ctrsv_template_device.cuh"
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
extern __shared__ magmaFloatComplex shared_data[];
//==============================================================================
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag>
__global__ void
ctrsv_notrans_kernel_outplace_batched(
int n,
magmaFloatComplex **A_array, int lda,
magmaFloatComplex **b_array, int incb,
magmaFloatComplex **x_array)
{
int batchid = blockIdx.z;
ctrsv_notrans_device<BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag>(n, A_array[batchid], lda, b_array[batchid], incb, x_array[batchid]);
}
//==============================================================================
template<const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag>
__global__ void
ctrsv_trans_kernel_outplace_batched(
int n,
magmaFloatComplex **A_array, int lda,
magmaFloatComplex **b_array, int incb,
magmaFloatComplex **x_array)
{
int batchid = blockIdx.z;
ctrsv_trans_device<BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag>(n, A_array[batchid], lda, b_array[batchid], incb, x_array[batchid]);
}
//==============================================================================
extern "C" void
magmablas_ctrsv_outofplace_batched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex ** A_array, magma_int_t lda,
magmaFloatComplex **b_array, magma_int_t incb,
magmaFloatComplex **x_array,
magma_int_t batchCount, magma_queue_t queue,
magma_int_t flag)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
dim3 threads( NUM_THREADS, 1, 1 );
dim3 blocks( 1, 1, batchCount );
size_t shmem = n * sizeof(magmaFloatComplex);
if (trans == MagmaNoTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit)
{
if (flag == 0) {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0) {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
}
else //Lower
{
if (diag == MagmaNonUnit)
{
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
}
}
else if (trans == MagmaTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
}
}
else if (trans == MagmaConjTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
}
}
}
//==============================================================================
extern "C" void
magmablas_ctrsv_recursive_outofplace_batched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex **A_array, magma_int_t lda,
magmaFloatComplex **b_array, magma_int_t incb,
magmaFloatComplex **x_array,
magma_int_t batchCount, magma_queue_t queue)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
//Init x_array with zero
//magmablas_claset_batched(MagmaFull, n, incb, MAGMA_C_ZERO, MAGMA_C_ZERO, x_array, n, batchCount, queue);
//memory allocation takes 0.32ms
magmaFloatComplex **dW0_displ = NULL;
magmaFloatComplex **dW1_displ = NULL;
magmaFloatComplex **dW2_displ = NULL;
magma_int_t alloc = 0;
alloc += magma_malloc((void**)&dW0_displ, batchCount * sizeof(*dW0_displ));
alloc += magma_malloc((void**)&dW1_displ, batchCount * sizeof(*dW1_displ));
alloc += magma_malloc((void**)&dW2_displ, batchCount * sizeof(*dW2_displ));
if (alloc != 0)
{
magma_free( dW0_displ );
magma_free( dW1_displ );
magma_free( dW2_displ );
info = MAGMA_ERR_DEVICE_ALLOC;
return;
}
magma_int_t col = n;
if (trans == MagmaNoTrans)
{
for (magma_int_t i=0; i < n; i+= NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaUpper)
{
col -= jb;
magma_cdisplace_pointers(dW0_displ, A_array, lda, col, col+jb, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, x_array, 1, col+jb, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
}
else
{
col = i;
magma_cdisplace_pointers(dW0_displ, A_array, lda, col, 0, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, x_array, 1, 0, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
}
//assume x_array contains zero elements
magmablas_cgemv_batched(MagmaNoTrans, jb, i, MAGMA_C_ONE, dW0_displ, lda, dW1_displ, 1, MAGMA_C_ONE, dW2_displ, 1, batchCount, queue);
magma_cdisplace_pointers(dW0_displ, A_array, lda, col, col, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, b_array, 1, col*incb, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
magmablas_ctrsv_outofplace_batched(uplo, trans, diag,jb, dW0_displ, lda, dW1_displ, incb, dW2_displ, batchCount, queue, i);
}
}
else
{
for (magma_int_t i=0; i < n; i += NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaLower)
{
col -= jb;
magma_cdisplace_pointers(dW0_displ, A_array, lda, col+jb, col, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, x_array, 1, col+jb, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
}
else
{
col = i;
magma_cdisplace_pointers(dW0_displ, A_array, lda, 0, col, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, x_array, 1, 0, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
}
//assume x_array contains zero elements
magmablas_cgemv_batched(trans, i, jb, MAGMA_C_ONE, dW0_displ, lda, dW1_displ, 1, MAGMA_C_ONE, dW2_displ, 1, batchCount, queue);
magma_cdisplace_pointers(dW0_displ, A_array, lda, col, col, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, b_array, 1, col*incb, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
magmablas_ctrsv_outofplace_batched(uplo, trans, diag, jb, dW0_displ, lda, dW1_displ, incb, dW2_displ, batchCount, queue, i);
}
}
magma_free(dW0_displ);
magma_free(dW1_displ);
magma_free(dW2_displ);
}
//==============================================================================
extern "C" void
magmablas_ctrsv_work_batched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex **A_array, magma_int_t lda,
magmaFloatComplex **b_array, magma_int_t incb,
magmaFloatComplex **x_array,
magma_int_t batchCount, magma_queue_t queue)
{
//magmablas_claset_batched(MagmaFull, n, incb, MAGMA_C_ZERO, MAGMA_C_ZERO, x_array, n, batchCount, queue);
//magmablas_ctrsv_recursive_outofplace_batched
magmablas_ctrsv_recursive_outofplace_batched(uplo, trans, diag, n, A_array, lda, b_array, incb, x_array, batchCount, queue);
magmablas_clacpy_batched( MagmaFull, n, incb, x_array, n, b_array, n, batchCount, queue);
}
//==============================================================================
/**
Purpose
-------
ctrsv solves one of the matrix equations on gpu
op(A)*x = b, or
x*op(A) = b,
where alpha is a scalar, X and B are vectors, A is a unit, or
non-unit, upper or lower triangular matrix and op(A) is one of
op(A) = A, or
op(A) = A^T, or
op(A) = A^H.
The vector x is overwritten on b.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
trans magma_trans_t.
On entry, trans specifies the form of op(A) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op(A) = A.
- = MagmaTrans: op(A) = A^T.
- = MagmaConjTrans: op(A) = A^H.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n N specifies the order of the matrix A. n >= 0.
@param[in]
A_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array A of dimension ( lda, n ),
Before entry with uplo = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular matrix and the strictly lower triangular part of
A is not referenced.
Before entry with uplo = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular matrix and the strictly upper triangular part of
A is not referenced.
Note that when diag = MagmaUnit, the diagonal elements of
A are not referenced either, but are assumed to be unity.
@param[in]
lda INTEGER.
On entry, lda specifies the first dimension of A.
lda >= max( 1, n ).
@param[in]
b_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array of dimension n
On exit, b is overwritten with the solution vector X.
@param[in]
incb INTEGER.
On entry, incb specifies the increment for the elements of
b. incb must not be zero.
Unchanged on exit.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_cblas2
********************************************************************/
//==============================================================================
extern "C" void
magmablas_ctrsv_batched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex **A_array, magma_int_t lda,
magmaFloatComplex **b_array, magma_int_t incb,
magma_int_t batchCount,
magma_queue_t queue)
{
magma_int_t size_x = n * incb;
magmaFloatComplex *x=NULL;
magmaFloatComplex **x_array = NULL;
magma_cmalloc( &x, size_x * batchCount);
magma_malloc((void**)&x_array, batchCount * sizeof(*x_array));
magma_cset_pointer( x_array, x, n, 0, 0, size_x, batchCount, queue );
magmablas_ctrsv_work_batched(uplo, trans, diag, n, A_array, lda, b_array, incb, x_array, batchCount, queue);
magma_free(x);
magma_free(x_array);
}
| 25ca68af6ba81c4c522299d0d0edc808b01be744.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Tingxing Dong
@author Azzam Haidar
@generated from magmablas/ztrsv_batched.cu normal z -> c, Tue Feb 9 16:05:39 2016
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "batched_kernel_param.h"
#define PRECISION_c
#define NB 256 //NB is the 1st level blocking in recursive blocking, BLOCK_SIZE is the 2ed level, NB=256, BLOCK_SIZE=64 is optimal for batched
#define NUM_THREADS 128 //64 //128
#define BLOCK_SIZE_N 128
#define DIM_X_N 128
#define DIM_Y_N 1
#define BLOCK_SIZE_T 32
#define DIM_X_T 16
#define DIM_Y_T 8
#include "ctrsv_template_device.cuh"
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
extern __shared__ magmaFloatComplex shared_data[];
//==============================================================================
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag>
__global__ void
ctrsv_notrans_kernel_outplace_batched(
int n,
magmaFloatComplex **A_array, int lda,
magmaFloatComplex **b_array, int incb,
magmaFloatComplex **x_array)
{
int batchid = blockIdx.z;
ctrsv_notrans_device<BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag>(n, A_array[batchid], lda, b_array[batchid], incb, x_array[batchid]);
}
//==============================================================================
template<const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag>
__global__ void
ctrsv_trans_kernel_outplace_batched(
int n,
magmaFloatComplex **A_array, int lda,
magmaFloatComplex **b_array, int incb,
magmaFloatComplex **x_array)
{
int batchid = blockIdx.z;
ctrsv_trans_device<BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag>(n, A_array[batchid], lda, b_array[batchid], incb, x_array[batchid]);
}
//==============================================================================
extern "C" void
magmablas_ctrsv_outofplace_batched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex ** A_array, magma_int_t lda,
magmaFloatComplex **b_array, magma_int_t incb,
magmaFloatComplex **x_array,
magma_int_t batchCount, magma_queue_t queue,
magma_int_t flag)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
dim3 threads( NUM_THREADS, 1, 1 );
dim3 blocks( 1, 1, batchCount );
size_t shmem = n * sizeof(magmaFloatComplex);
if (trans == MagmaNoTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit)
{
if (flag == 0) {
ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0) {
ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
}
else //Lower
{
if (diag == MagmaNonUnit)
{
if (flag == 0)
{
ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0)
{
ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
}
}
else if (trans == MagmaTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
}
}
else if (trans == MagmaConjTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
}
}
}
//==============================================================================
extern "C" void
magmablas_ctrsv_recursive_outofplace_batched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex **A_array, magma_int_t lda,
magmaFloatComplex **b_array, magma_int_t incb,
magmaFloatComplex **x_array,
magma_int_t batchCount, magma_queue_t queue)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
//Init x_array with zero
//magmablas_claset_batched(MagmaFull, n, incb, MAGMA_C_ZERO, MAGMA_C_ZERO, x_array, n, batchCount, queue);
//memory allocation takes 0.32ms
magmaFloatComplex **dW0_displ = NULL;
magmaFloatComplex **dW1_displ = NULL;
magmaFloatComplex **dW2_displ = NULL;
magma_int_t alloc = 0;
alloc += magma_malloc((void**)&dW0_displ, batchCount * sizeof(*dW0_displ));
alloc += magma_malloc((void**)&dW1_displ, batchCount * sizeof(*dW1_displ));
alloc += magma_malloc((void**)&dW2_displ, batchCount * sizeof(*dW2_displ));
if (alloc != 0)
{
magma_free( dW0_displ );
magma_free( dW1_displ );
magma_free( dW2_displ );
info = MAGMA_ERR_DEVICE_ALLOC;
return;
}
magma_int_t col = n;
if (trans == MagmaNoTrans)
{
for (magma_int_t i=0; i < n; i+= NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaUpper)
{
col -= jb;
magma_cdisplace_pointers(dW0_displ, A_array, lda, col, col+jb, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, x_array, 1, col+jb, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
}
else
{
col = i;
magma_cdisplace_pointers(dW0_displ, A_array, lda, col, 0, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, x_array, 1, 0, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
}
//assume x_array contains zero elements
magmablas_cgemv_batched(MagmaNoTrans, jb, i, MAGMA_C_ONE, dW0_displ, lda, dW1_displ, 1, MAGMA_C_ONE, dW2_displ, 1, batchCount, queue);
magma_cdisplace_pointers(dW0_displ, A_array, lda, col, col, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, b_array, 1, col*incb, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
magmablas_ctrsv_outofplace_batched(uplo, trans, diag,jb, dW0_displ, lda, dW1_displ, incb, dW2_displ, batchCount, queue, i);
}
}
else
{
for (magma_int_t i=0; i < n; i += NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaLower)
{
col -= jb;
magma_cdisplace_pointers(dW0_displ, A_array, lda, col+jb, col, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, x_array, 1, col+jb, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
}
else
{
col = i;
magma_cdisplace_pointers(dW0_displ, A_array, lda, 0, col, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, x_array, 1, 0, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
}
//assume x_array contains zero elements
magmablas_cgemv_batched(trans, i, jb, MAGMA_C_ONE, dW0_displ, lda, dW1_displ, 1, MAGMA_C_ONE, dW2_displ, 1, batchCount, queue);
magma_cdisplace_pointers(dW0_displ, A_array, lda, col, col, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, b_array, 1, col*incb, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
magmablas_ctrsv_outofplace_batched(uplo, trans, diag, jb, dW0_displ, lda, dW1_displ, incb, dW2_displ, batchCount, queue, i);
}
}
magma_free(dW0_displ);
magma_free(dW1_displ);
magma_free(dW2_displ);
}
//==============================================================================
extern "C" void
magmablas_ctrsv_work_batched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex **A_array, magma_int_t lda,
magmaFloatComplex **b_array, magma_int_t incb,
magmaFloatComplex **x_array,
magma_int_t batchCount, magma_queue_t queue)
{
//magmablas_claset_batched(MagmaFull, n, incb, MAGMA_C_ZERO, MAGMA_C_ZERO, x_array, n, batchCount, queue);
//magmablas_ctrsv_recursive_outofplace_batched
magmablas_ctrsv_recursive_outofplace_batched(uplo, trans, diag, n, A_array, lda, b_array, incb, x_array, batchCount, queue);
magmablas_clacpy_batched( MagmaFull, n, incb, x_array, n, b_array, n, batchCount, queue);
}
//==============================================================================
/**
Purpose
-------
ctrsv solves one of the matrix equations on gpu
op(A)*x = b, or
x*op(A) = b,
where alpha is a scalar, X and B are vectors, A is a unit, or
non-unit, upper or lower triangular matrix and op(A) is one of
op(A) = A, or
op(A) = A^T, or
op(A) = A^H.
The vector x is overwritten on b.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
trans magma_trans_t.
On entry, trans specifies the form of op(A) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op(A) = A.
- = MagmaTrans: op(A) = A^T.
- = MagmaConjTrans: op(A) = A^H.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n N specifies the order of the matrix A. n >= 0.
@param[in]
A_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array A of dimension ( lda, n ),
Before entry with uplo = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular matrix and the strictly lower triangular part of
A is not referenced.
Before entry with uplo = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular matrix and the strictly upper triangular part of
A is not referenced.
Note that when diag = MagmaUnit, the diagonal elements of
A are not referenced either, but are assumed to be unity.
@param[in]
lda INTEGER.
On entry, lda specifies the first dimension of A.
lda >= max( 1, n ).
@param[in]
b_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array of dimension n
On exit, b is overwritten with the solution vector X.
@param[in]
incb INTEGER.
On entry, incb specifies the increment for the elements of
b. incb must not be zero.
Unchanged on exit.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_cblas2
********************************************************************/
//==============================================================================
extern "C" void
magmablas_ctrsv_batched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex **A_array, magma_int_t lda,
magmaFloatComplex **b_array, magma_int_t incb,
magma_int_t batchCount,
magma_queue_t queue)
{
magma_int_t size_x = n * incb;
magmaFloatComplex *x=NULL;
magmaFloatComplex **x_array = NULL;
magma_cmalloc( &x, size_x * batchCount);
magma_malloc((void**)&x_array, batchCount * sizeof(*x_array));
magma_cset_pointer( x_array, x, n, 0, 0, size_x, batchCount, queue );
magmablas_ctrsv_work_batched(uplo, trans, diag, n, A_array, lda, b_array, incb, x_array, batchCount, queue);
magma_free(x);
magma_free(x_array);
}
|
4e9a6b075bb9783f3d0f7332a6be9e8f3fae62df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_array_beam_slave_sincos_original(int N, float r1, float r2, float r3, float *x, float *y, float *z, float *sum, int blockDim_2) {
unsigned int n=threadIdx.x; //+blockDim.x*blockIdx.x;
__shared__ float tmpsum[1000]; /* assumed to be size 2*Nx1 */
if (n<N) {
float ss,cc;
sincosf((r1*__ldg(&x[n])+r2*__ldg(&y[n])+r3*__ldg(&z[n])),&ss,&cc);
tmpsum[2*n]=ss;
tmpsum[2*n+1]=cc;
}
__syncthreads();
// Build summation tree over elements, handling case where total threads is not a power of two.
int nTotalThreads = blockDim_2; // Total number of threads (==N), rounded up to the next power of two
while(nTotalThreads > 1) {
int halfPoint = (nTotalThreads >> 1); // divide by two
if (n < halfPoint) {
int thread2 = n + halfPoint;
if (thread2 < blockDim.x) { // Skipping the fictitious threads >N ( blockDim.x ... blockDim_2-1 )
tmpsum[2*n] = tmpsum[2*n]+tmpsum[2*thread2];
tmpsum[2*n+1] = tmpsum[2*n+1]+tmpsum[2*thread2+1];
}
}
__syncthreads();
nTotalThreads = halfPoint; // Reducing the binary tree size by two
}
/* now thread 0 will add up results */
if (threadIdx.x==0) {
sum[0]=tmpsum[0];
sum[1]=tmpsum[1];
}
} | 4e9a6b075bb9783f3d0f7332a6be9e8f3fae62df.cu | #include "includes.h"
__global__ void kernel_array_beam_slave_sincos_original(int N, float r1, float r2, float r3, float *x, float *y, float *z, float *sum, int blockDim_2) {
unsigned int n=threadIdx.x; //+blockDim.x*blockIdx.x;
__shared__ float tmpsum[1000]; /* assumed to be size 2*Nx1 */
if (n<N) {
float ss,cc;
sincosf((r1*__ldg(&x[n])+r2*__ldg(&y[n])+r3*__ldg(&z[n])),&ss,&cc);
tmpsum[2*n]=ss;
tmpsum[2*n+1]=cc;
}
__syncthreads();
// Build summation tree over elements, handling case where total threads is not a power of two.
int nTotalThreads = blockDim_2; // Total number of threads (==N), rounded up to the next power of two
while(nTotalThreads > 1) {
int halfPoint = (nTotalThreads >> 1); // divide by two
if (n < halfPoint) {
int thread2 = n + halfPoint;
if (thread2 < blockDim.x) { // Skipping the fictitious threads >N ( blockDim.x ... blockDim_2-1 )
tmpsum[2*n] = tmpsum[2*n]+tmpsum[2*thread2];
tmpsum[2*n+1] = tmpsum[2*n+1]+tmpsum[2*thread2+1];
}
}
__syncthreads();
nTotalThreads = halfPoint; // Reducing the binary tree size by two
}
/* now thread 0 will add up results */
if (threadIdx.x==0) {
sum[0]=tmpsum[0];
sum[1]=tmpsum[1];
}
} |
b33ad0b1e7e2b3d4d50bf6ab6cdeae2b95b80505.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* @Author: Xu.WANG
* @Date: 2021-02-03 22:59:48
* @LastEditTime: 2021-02-13 23:29:39
* @LastEditors: Xu.WANG
* @Description:
* @FilePath: \Kiri\KiriPBSCuda\src\kiri_pbs_cuda\system\cuda_sph_system.cu
*/
#include <kiri_pbs_cuda/thrust_helper/helper_thrust.cuh>
#include <kiri_pbs_cuda/system/cuda_sph_system.cuh>
#include <kiri_pbs_cuda/system/cuda_sph_system_gpu.cuh>
#include <glad/glad.h>
#include <cuda_gl_interop.h>
namespace KIRI
{
CudaSphSystem::CudaSphSystem(
CudaSphParticlesPtr &fluidParticles,
CudaBoundaryParticlesPtr &boundaryParticles,
CudaBaseSolverPtr &solver,
CudaGNSearcherPtr &searcher,
CudaGNBoundarySearcherPtr &boundarySearcher,
bool openGL)
: mFluids(std::move(fluidParticles)),
mBoundaries(std::move(boundaryParticles)),
mSolver(std::move(solver)),
mSearcher(std::move(searcher)),
mBoundarySearcher(std::move(boundarySearcher)),
bOpenGL(openGL),
mCudaGridSize(CuCeilDiv(mFluids->Size(), KIRI_CUBLOCKSIZE))
{
uint maxNumOfParticles = mFluids->Size();
KIRI_CUCALL(hipMalloc((void **)&pptr, sizeof(float4) * maxNumOfParticles));
KIRI_CUCALL(hipMalloc((void **)&cptr, sizeof(float4) * maxNumOfParticles));
// init position vbo
uint bufSize = maxNumOfParticles * sizeof(float4);
glGenBuffers(1, &mPositionsVBO);
glBindBuffer(GL_ARRAY_BUFFER, mPositionsVBO);
glBufferData(GL_ARRAY_BUFFER, bufSize, nullptr, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// init color vbo
uint colorBufSize = maxNumOfParticles * sizeof(float4);
glGenBuffers(1, &mColorsVBO);
glBindBuffer(GL_ARRAY_BUFFER, mColorsVBO);
glBufferData(GL_ARRAY_BUFFER, colorBufSize, nullptr, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// build boundary searcher
mBoundarySearcher->BuildGNSearcher(mBoundaries);
// compute boundary volume(Akinci2012)
ComputeBoundaryVolume();
// init fluid system
thrust::fill(thrust::device, mFluids->GetMassPtr(), mFluids->GetMassPtr() + mFluids->Size(), CUDA_SPH_PARAMS.rest_mass);
if (bOpenGL)
UpdateSystemForVBO();
else
UpdateSystem();
}
void CudaSphSystem::UpdateSystemForVBO()
{
KIRI_CUCALL(hipGraphicsGLRegisterBuffer(&mCudaGraphPosVBORes, mPositionsVBO,
hipGraphicsMapFlagsNone));
KIRI_CUCALL(hipGraphicsGLRegisterBuffer(&mCudaGraphColorVBORes, mColorsVBO,
hipGraphicsMapFlagsNone));
size_t numBytes = 0;
KIRI_CUCALL(hipGraphicsMapResources(1, &mCudaGraphPosVBORes, 0));
KIRI_CUCALL(hipGraphicsResourceGetMappedPointer(
(void **)&pptr, &numBytes, mCudaGraphPosVBORes));
size_t colorNumBytes = 0;
KIRI_CUCALL(hipGraphicsMapResources(1, &mCudaGraphColorVBORes, 0));
KIRI_CUCALL(hipGraphicsResourceGetMappedPointer(
(void **)&cptr, &colorNumBytes, mCudaGraphColorVBORes));
UpdateSystem();
CopyGPUData2VBO(pptr, cptr, mFluids);
KIRI_CUCALL(hipGraphicsUnmapResources(1, &mCudaGraphPosVBORes, 0));
KIRI_CUCALL(hipGraphicsUnregisterResource(mCudaGraphPosVBORes));
KIRI_CUCALL(hipGraphicsUnmapResources(1, &mCudaGraphColorVBORes, 0));
KIRI_CUCALL(hipGraphicsUnregisterResource(mCudaGraphColorVBORes));
}
void CudaSphSystem::CopyGPUData2VBO(float4 *pos, float4 *col, const CudaSphParticlesPtr &fluids)
{
hipLaunchKernelGGL(( CopyGPUData2VBO_CUDA), dim3(mCudaGridSize), dim3(KIRI_CUBLOCKSIZE), 0, 0, pos, col, fluids->GetPosPtr(), fluids->GetColPtr(), fluids->Size(), CUDA_SPH_PARAMS.particle_radius);
KIRI_CUKERNAL();
}
void CudaSphSystem::ComputeBoundaryVolume()
{
auto mCudaBoundaryGridSize = CuCeilDiv(mBoundaries->Size(), KIRI_CUBLOCKSIZE);
hipLaunchKernelGGL(( ComputeBoundaryVolume_CUDA), dim3(mCudaBoundaryGridSize), dim3(KIRI_CUBLOCKSIZE), 0, 0,
mBoundaries->GetPosPtr(),
mBoundaries->GetVolumePtr(),
mBoundaries->Size(),
mBoundarySearcher->GetCellStartPtr(),
mBoundarySearcher->GetGridSize(),
ThrustHelper::Pos2GridXYZ<float3>(mBoundarySearcher->GetLowestPoint(), mBoundarySearcher->GetCellSize(), mBoundarySearcher->GetGridSize()),
ThrustHelper::GridXYZ2GridHash(mBoundarySearcher->GetGridSize()),
Poly6Kernel(mBoundarySearcher->GetCellSize()));
KIRI_CUKERNAL();
}
float CudaSphSystem::UpdateSystem()
{
hipEvent_t start, stop;
KIRI_CUCALL(hipEventCreate(&start));
KIRI_CUCALL(hipEventCreate(&stop));
KIRI_CUCALL(hipEventRecord(start, 0));
mSearcher->BuildGNSearcher(mFluids);
try
{
mSolver->UpdateSolver(
mFluids,
mBoundaries,
mSearcher->GetCellStart(),
mBoundarySearcher->GetCellStart(),
CUDA_SPH_PARAMS,
CUDA_BOUNDARY_PARAMS);
hipDeviceSynchronize();
KIRI_CUKERNAL();
}
catch (const char *s)
{
std::cout << s << "\n";
}
catch (...)
{
std::cout << "Unknown Exception at " << __FILE__ << ": line " << __LINE__ << "\n";
}
float milliseconds;
KIRI_CUCALL(hipEventRecord(stop, 0));
KIRI_CUCALL(hipEventSynchronize(stop));
KIRI_CUCALL(hipEventElapsedTime(&milliseconds, start, stop));
KIRI_CUCALL(hipEventDestroy(start));
KIRI_CUCALL(hipEventDestroy(stop));
return milliseconds;
}
} // namespace KIRI
| b33ad0b1e7e2b3d4d50bf6ab6cdeae2b95b80505.cu | /*
* @Author: Xu.WANG
* @Date: 2021-02-03 22:59:48
* @LastEditTime: 2021-02-13 23:29:39
* @LastEditors: Xu.WANG
* @Description:
* @FilePath: \Kiri\KiriPBSCuda\src\kiri_pbs_cuda\system\cuda_sph_system.cu
*/
#include <kiri_pbs_cuda/thrust_helper/helper_thrust.cuh>
#include <kiri_pbs_cuda/system/cuda_sph_system.cuh>
#include <kiri_pbs_cuda/system/cuda_sph_system_gpu.cuh>
#include <glad/glad.h>
#include <cuda_gl_interop.h>
namespace KIRI
{
CudaSphSystem::CudaSphSystem(
CudaSphParticlesPtr &fluidParticles,
CudaBoundaryParticlesPtr &boundaryParticles,
CudaBaseSolverPtr &solver,
CudaGNSearcherPtr &searcher,
CudaGNBoundarySearcherPtr &boundarySearcher,
bool openGL)
: mFluids(std::move(fluidParticles)),
mBoundaries(std::move(boundaryParticles)),
mSolver(std::move(solver)),
mSearcher(std::move(searcher)),
mBoundarySearcher(std::move(boundarySearcher)),
bOpenGL(openGL),
mCudaGridSize(CuCeilDiv(mFluids->Size(), KIRI_CUBLOCKSIZE))
{
uint maxNumOfParticles = mFluids->Size();
KIRI_CUCALL(cudaMalloc((void **)&pptr, sizeof(float4) * maxNumOfParticles));
KIRI_CUCALL(cudaMalloc((void **)&cptr, sizeof(float4) * maxNumOfParticles));
// init position vbo
uint bufSize = maxNumOfParticles * sizeof(float4);
glGenBuffers(1, &mPositionsVBO);
glBindBuffer(GL_ARRAY_BUFFER, mPositionsVBO);
glBufferData(GL_ARRAY_BUFFER, bufSize, nullptr, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// init color vbo
uint colorBufSize = maxNumOfParticles * sizeof(float4);
glGenBuffers(1, &mColorsVBO);
glBindBuffer(GL_ARRAY_BUFFER, mColorsVBO);
glBufferData(GL_ARRAY_BUFFER, colorBufSize, nullptr, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// build boundary searcher
mBoundarySearcher->BuildGNSearcher(mBoundaries);
// compute boundary volume(Akinci2012)
ComputeBoundaryVolume();
// init fluid system
thrust::fill(thrust::device, mFluids->GetMassPtr(), mFluids->GetMassPtr() + mFluids->Size(), CUDA_SPH_PARAMS.rest_mass);
if (bOpenGL)
UpdateSystemForVBO();
else
UpdateSystem();
}
void CudaSphSystem::UpdateSystemForVBO()
{
KIRI_CUCALL(cudaGraphicsGLRegisterBuffer(&mCudaGraphPosVBORes, mPositionsVBO,
cudaGraphicsMapFlagsNone));
KIRI_CUCALL(cudaGraphicsGLRegisterBuffer(&mCudaGraphColorVBORes, mColorsVBO,
cudaGraphicsMapFlagsNone));
size_t numBytes = 0;
KIRI_CUCALL(cudaGraphicsMapResources(1, &mCudaGraphPosVBORes, 0));
KIRI_CUCALL(cudaGraphicsResourceGetMappedPointer(
(void **)&pptr, &numBytes, mCudaGraphPosVBORes));
size_t colorNumBytes = 0;
KIRI_CUCALL(cudaGraphicsMapResources(1, &mCudaGraphColorVBORes, 0));
KIRI_CUCALL(cudaGraphicsResourceGetMappedPointer(
(void **)&cptr, &colorNumBytes, mCudaGraphColorVBORes));
UpdateSystem();
CopyGPUData2VBO(pptr, cptr, mFluids);
KIRI_CUCALL(cudaGraphicsUnmapResources(1, &mCudaGraphPosVBORes, 0));
KIRI_CUCALL(cudaGraphicsUnregisterResource(mCudaGraphPosVBORes));
KIRI_CUCALL(cudaGraphicsUnmapResources(1, &mCudaGraphColorVBORes, 0));
KIRI_CUCALL(cudaGraphicsUnregisterResource(mCudaGraphColorVBORes));
}
void CudaSphSystem::CopyGPUData2VBO(float4 *pos, float4 *col, const CudaSphParticlesPtr &fluids)
{
CopyGPUData2VBO_CUDA<<<mCudaGridSize, KIRI_CUBLOCKSIZE>>>(pos, col, fluids->GetPosPtr(), fluids->GetColPtr(), fluids->Size(), CUDA_SPH_PARAMS.particle_radius);
KIRI_CUKERNAL();
}
void CudaSphSystem::ComputeBoundaryVolume()
{
auto mCudaBoundaryGridSize = CuCeilDiv(mBoundaries->Size(), KIRI_CUBLOCKSIZE);
ComputeBoundaryVolume_CUDA<<<mCudaBoundaryGridSize, KIRI_CUBLOCKSIZE>>>(
mBoundaries->GetPosPtr(),
mBoundaries->GetVolumePtr(),
mBoundaries->Size(),
mBoundarySearcher->GetCellStartPtr(),
mBoundarySearcher->GetGridSize(),
ThrustHelper::Pos2GridXYZ<float3>(mBoundarySearcher->GetLowestPoint(), mBoundarySearcher->GetCellSize(), mBoundarySearcher->GetGridSize()),
ThrustHelper::GridXYZ2GridHash(mBoundarySearcher->GetGridSize()),
Poly6Kernel(mBoundarySearcher->GetCellSize()));
KIRI_CUKERNAL();
}
float CudaSphSystem::UpdateSystem()
{
cudaEvent_t start, stop;
KIRI_CUCALL(cudaEventCreate(&start));
KIRI_CUCALL(cudaEventCreate(&stop));
KIRI_CUCALL(cudaEventRecord(start, 0));
mSearcher->BuildGNSearcher(mFluids);
try
{
mSolver->UpdateSolver(
mFluids,
mBoundaries,
mSearcher->GetCellStart(),
mBoundarySearcher->GetCellStart(),
CUDA_SPH_PARAMS,
CUDA_BOUNDARY_PARAMS);
cudaDeviceSynchronize();
KIRI_CUKERNAL();
}
catch (const char *s)
{
std::cout << s << "\n";
}
catch (...)
{
std::cout << "Unknown Exception at " << __FILE__ << ": line " << __LINE__ << "\n";
}
float milliseconds;
KIRI_CUCALL(cudaEventRecord(stop, 0));
KIRI_CUCALL(cudaEventSynchronize(stop));
KIRI_CUCALL(cudaEventElapsedTime(&milliseconds, start, stop));
KIRI_CUCALL(cudaEventDestroy(start));
KIRI_CUCALL(cudaEventDestroy(stop));
return milliseconds;
}
} // namespace KIRI
|
b47f407a776add56caf4a62f2d3ce8e585c27aaa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include "HeldKarp.cuh"
/* Returns the index of the subset in the memo array.
*
* Parameters:
* set - the set of numbers to get an index for; the source point should
* be 0
* size - the number of points in the whole TSP algorithm
*/
__device__
int cudaGetSetIndex(Set set, int size) {
/*!
* Unfortunately it is hard to explain how this arithmetic works.
* Basically, we know that in a set S with |S| = n, there are 2^n
* subsets. Thus, there are 2^(n - 1) subsets whose first number is
* fixed. However, if we put the constraint that it is only considered
* a subset if it is sorted, then we find that there are always
* 2 ^ (n - m) subsets when we fix the first two digits (remember the
* subsets are not repeated, so these first two digits are smaller than
* any other value in the set). This was found purely by looking for
* a pattern. Using this, we can find a unique index for any subset in
* O(log(n)) time.
*/
// Sort the list so we can find its index.
set.sort();
// We will continually add to the returned index
int memoIndex = 0;
// Remember the lowest value we havent seen. We start at 1 because the
// smallest subset that makes sense in this problem has two elements.
// Thus, every set must have at least one value 1 or greater.
int lowest = 1;
// This is the index in the set we are currently iterating over. We start
// at 1 because the first element will always be the same (because we
// have a fixed first point in the problem)
int setIndex = 1;
while (1) {
// Add in values for every subset of this subset we skip over
for (; lowest < set[setIndex]; lowest++)
memoIndex += powf(2, size - lowest - 1);
// Increment the lowest value so that we don't double-check it.
lowest++;
setIndex++;
// Break if we have seen every index
if (set.nValues == setIndex)
return memoIndex;
// Increment the memo index because of a zero case that occurs if the
// next iteration is what was guessed.
memoIndex++;
}
}
/**
* Gets all of the distances between any two points
*
*
* points - List of x, y coordinates of points to find distances between.
* nPoints - Number of points
* distances - Array of distances between pairs of points.
*/
__global__
void cudaGetDistances(Point2D *points, int nPoints, float *distances) {
// Get the index of the thread so we only iterate part of the data.
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Variables when filling in the distances array
int row, col;
while (tid < (nPoints * nPoints)) {
// The row and column can be determined from mod and division
row = tid / nPoints;
col = tid % nPoints;
// Get Euclidean distance and put it into the array.
distances[tid] = points[row].distanceTo(points[col]);
// Advance thread index.
tid += blockDim.x * gridDim.x;
}
}
void cudaCallGetDistances(int nBlocks,
int threadsPerBlock,
Point2D *points,
int nPoints,
float *distances) {
// Number of bytes of shared memory
int shmem = 0;
// Fill in all of the distances between two points.
hipLaunchKernelGGL(( cudaGetDistances), dim3(nBlocks), dim3(threadsPerBlock), shmem, 0, points, nPoints, distances);
}
/**
* Gets the first rows of the memoization array so the rest of the algorithm can
* run. These are the rows for every set that has only two points (the first
* is always the source point).
*
*
* memoArray - The memoization array whose first rows will be initialized
* points - The (x, y) coordinates of points that will be memoized.
* nPoints - The number of points
* distances - Distances between every pair of two points.
*/
__global__
void cudaInitializeMemoArray(HeldKarpMemoArray memoArray,
Point2D *points,
int nPoints,
float *distances) {
// Get the index of the thread so we only iterate part of the data.
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// We don't care about the 0 to 0 case, so we will skip right away if
// tid is 0.
tid = (tid == 0 ? blockDim.x * gridDim.x : tid);
while (tid < nPoints) {
// Create a length two subset with the source as the first point
int setPoints[2] = { 0, tid };
// Memoize the "shortest distance" as the distance between these points.
memoArray[cudaGetSetIndex(Set(setPoints, 2), nPoints)].updateRow(tid, distances[tid], 0);
// Advance thread index.
tid += blockDim.x * gridDim.x;
}
}
void cudaCallInitializeMemoArray(int nBlocks,
int threadsPerBlock,
HeldKarpMemoArray memoArray,
Point2D *points,
int nPoints,
float *distances) {
// Number of bytes of shared memory
int shmem = 0;
// Initialize the memo array withs subsets of length 2
hipLaunchKernelGGL(( cudaInitializeMemoArray), dim3(nBlocks), dim3(threadsPerBlock), shmem, 0,
memoArray, points, nPoints, distances);
}
/**
* Calculates the distance of the path through all points ending in any two
* points. The shortest of these will then be found in a different kernel.
*
*
* set - The set of points to find the paths between.
* memoArray - The memoization array from which to draw information.
* distances - Distances between every pair of two points.
* nPoints - Number of points.
* mins - Array of distance/previous pairs that is filled by this function
* and left for another kernel to find the minimum of.
*/
__global__
void cudaHeldKarpKernel(Set set,
HeldKarpMemoArray memoArray,
float *distances,
int nPoints,
HeldKarpMemo *mins) {
// Get the index of the thread so we only iterate part of the data.
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
int firstIteration = 1;
// Variables required.
int m, k;
// Finding every combination of m and k in one motion. We will store the
// distance and prev for each of these combinations, then another kernel
// will find the minimum of all of these values for each k. For more info
// on m and k, refer to HeldKarp.cc We will treat the mins array as an
// array with set.nValues rows and set.nValues - 1 columns.
while (tid < (set.nValues * set.nValues)) {
// Get k and m from the tid
k = tid / set.nValues; // Index of value subtracting from set
m = tid % set.nValues; // Value asserting as last in set
// We never want 0 to be last, and last can't also be removed from set
if (m != k && set[m] != 0 && set[k] != 0) {
// Remove k from set to look at shortest path ending in m, k
Set newSet = set - set[k];
// Store the distance and prev in mins to get the min later.
HeldKarpMemoRow memo = memoArray[cudaGetSetIndex(newSet, nPoints)];
if ((memo[newSet[m]].dist + distances[newSet[m] + set[k]] < mins[k].dist) ||
firstIteration == 1) {
// Second clause would be the first iteration, in which we save
// no matter what
mins[k].dist = memo[newSet[m]].dist + distances[newSet[m] + set[k]];
mins[k].prev = newSet[m];
}
firstIteration = 0;
}
// Advance thread index.
tid += blockDim.x * gridDim.x;
}
__syncthreads();
for (int k = 0; k < set.nValues; k++) {
memoArray[cudaGetSetIndex(set, nPoints)].updateRow(set[k], mins[k].dist, mins[k].prev);
}
}
void cudaCallHeldKarpKernel(int nBlocks,
int threadsPerBlock,
Set set,
HeldKarpMemoArray memoArray,
float *distances,
int nPoints,
HeldKarpMemo *mins) {
hipLaunchKernelGGL(( cudaHeldKarpKernel), dim3(nBlocks), dim3(threadsPerBlock), 0, 0, \
set, memoArray, distances, nPoints, mins);
}
| b47f407a776add56caf4a62f2d3ce8e585c27aaa.cu |
#include <cmath>
#include "HeldKarp.cuh"
/* Returns the index of the subset in the memo array.
*
* Parameters:
* set - the set of numbers to get an index for; the source point should
* be 0
* size - the number of points in the whole TSP algorithm
*/
__device__
int cudaGetSetIndex(Set set, int size) {
/*!
* Unfortunately it is hard to explain how this arithmetic works.
* Basically, we know that in a set S with |S| = n, there are 2^n
* subsets. Thus, there are 2^(n - 1) subsets whose first number is
* fixed. However, if we put the constraint that it is only considered
* a subset if it is sorted, then we find that there are always
* 2 ^ (n - m) subsets when we fix the first two digits (remember the
* subsets are not repeated, so these first two digits are smaller than
* any other value in the set). This was found purely by looking for
* a pattern. Using this, we can find a unique index for any subset in
* O(log(n)) time.
*/
// Sort the list so we can find its index.
set.sort();
// We will continually add to the returned index
int memoIndex = 0;
// Remember the lowest value we havent seen. We start at 1 because the
// smallest subset that makes sense in this problem has two elements.
// Thus, every set must have at least one value 1 or greater.
int lowest = 1;
// This is the index in the set we are currently iterating over. We start
// at 1 because the first element will always be the same (because we
// have a fixed first point in the problem)
int setIndex = 1;
while (1) {
// Add in values for every subset of this subset we skip over
for (; lowest < set[setIndex]; lowest++)
memoIndex += powf(2, size - lowest - 1);
// Increment the lowest value so that we don't double-check it.
lowest++;
setIndex++;
// Break if we have seen every index
if (set.nValues == setIndex)
return memoIndex;
// Increment the memo index because of a zero case that occurs if the
// next iteration is what was guessed.
memoIndex++;
}
}
/**
* Gets all of the distances between any two points
*
*
* points - List of x, y coordinates of points to find distances between.
* nPoints - Number of points
* distances - Array of distances between pairs of points.
*/
__global__
void cudaGetDistances(Point2D *points, int nPoints, float *distances) {
// Get the index of the thread so we only iterate part of the data.
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Variables when filling in the distances array
int row, col;
while (tid < (nPoints * nPoints)) {
// The row and column can be determined from mod and division
row = tid / nPoints;
col = tid % nPoints;
// Get Euclidean distance and put it into the array.
distances[tid] = points[row].distanceTo(points[col]);
// Advance thread index.
tid += blockDim.x * gridDim.x;
}
}
void cudaCallGetDistances(int nBlocks,
int threadsPerBlock,
Point2D *points,
int nPoints,
float *distances) {
// Number of bytes of shared memory
int shmem = 0;
// Fill in all of the distances between two points.
cudaGetDistances<<<nBlocks, threadsPerBlock, shmem>>>(points, nPoints, distances);
}
/**
* Gets the first rows of the memoization array so the rest of the algorithm can
* run. These are the rows for every set that has only two points (the first
* is always the source point).
*
*
* memoArray - The memoization array whose first rows will be initialized
* points - The (x, y) coordinates of points that will be memoized.
* nPoints - The number of points
* distances - Distances between every pair of two points.
*/
__global__
void cudaInitializeMemoArray(HeldKarpMemoArray memoArray,
Point2D *points,
int nPoints,
float *distances) {
// Get the index of the thread so we only iterate part of the data.
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// We don't care about the 0 to 0 case, so we will skip right away if
// tid is 0.
tid = (tid == 0 ? blockDim.x * gridDim.x : tid);
while (tid < nPoints) {
// Create a length two subset with the source as the first point
int setPoints[2] = { 0, tid };
// Memoize the "shortest distance" as the distance between these points.
memoArray[cudaGetSetIndex(Set(setPoints, 2), nPoints)].updateRow(tid, distances[tid], 0);
// Advance thread index.
tid += blockDim.x * gridDim.x;
}
}
void cudaCallInitializeMemoArray(int nBlocks,
int threadsPerBlock,
HeldKarpMemoArray memoArray,
Point2D *points,
int nPoints,
float *distances) {
// Number of bytes of shared memory
int shmem = 0;
// Initialize the memo array withs subsets of length 2
cudaInitializeMemoArray<<<nBlocks, threadsPerBlock, shmem>>>
(memoArray, points, nPoints, distances);
}
/**
* Calculates the distance of the path through all points ending in any two
* points. The shortest of these will then be found in a different kernel.
*
*
* set - The set of points to find the paths between.
* memoArray - The memoization array from which to draw information.
* distances - Distances between every pair of two points.
* nPoints - Number of points.
* mins - Array of distance/previous pairs that is filled by this function
* and left for another kernel to find the minimum of.
*/
__global__
void cudaHeldKarpKernel(Set set,
HeldKarpMemoArray memoArray,
float *distances,
int nPoints,
HeldKarpMemo *mins) {
// Get the index of the thread so we only iterate part of the data.
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
int firstIteration = 1;
// Variables required.
int m, k;
// Finding every combination of m and k in one motion. We will store the
// distance and prev for each of these combinations, then another kernel
// will find the minimum of all of these values for each k. For more info
// on m and k, refer to HeldKarp.cc We will treat the mins array as an
// array with set.nValues rows and set.nValues - 1 columns.
while (tid < (set.nValues * set.nValues)) {
// Get k and m from the tid
k = tid / set.nValues; // Index of value subtracting from set
m = tid % set.nValues; // Value asserting as last in set
// We never want 0 to be last, and last can't also be removed from set
if (m != k && set[m] != 0 && set[k] != 0) {
// Remove k from set to look at shortest path ending in m, k
Set newSet = set - set[k];
// Store the distance and prev in mins to get the min later.
HeldKarpMemoRow memo = memoArray[cudaGetSetIndex(newSet, nPoints)];
if ((memo[newSet[m]].dist + distances[newSet[m] + set[k]] < mins[k].dist) ||
firstIteration == 1) {
// Second clause would be the first iteration, in which we save
// no matter what
mins[k].dist = memo[newSet[m]].dist + distances[newSet[m] + set[k]];
mins[k].prev = newSet[m];
}
firstIteration = 0;
}
// Advance thread index.
tid += blockDim.x * gridDim.x;
}
__syncthreads();
for (int k = 0; k < set.nValues; k++) {
memoArray[cudaGetSetIndex(set, nPoints)].updateRow(set[k], mins[k].dist, mins[k].prev);
}
}
void cudaCallHeldKarpKernel(int nBlocks,
int threadsPerBlock,
Set set,
HeldKarpMemoArray memoArray,
float *distances,
int nPoints,
HeldKarpMemo *mins) {
cudaHeldKarpKernel<<<nBlocks, threadsPerBlock>>> \
(set, memoArray, distances, nPoints, mins);
}
|
2ea8d728044c51ed74cb4c237da7a3e0de955114.hip | // !!! This is a file automatically generated by hipify!!!
#include "vv.h"
#include "integration_kernel.h"
#include "forcing_terms/none.h"
#include "forcing_terms/const_dp.h"
#include "forcing_terms/periodic_poiseuille.h"
#include <mirheo/core/logger.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/utils/config.h>
#include <mirheo/core/utils/reflection.h>
namespace mirheo
{
template<class ForcingTerm>
IntegratorVV<ForcingTerm>::IntegratorVV(const MirState *state, const std::string& name, ForcingTerm forcingTerm) :
Integrator(state, name),
forcingTerm_(forcingTerm)
{}
template<class ForcingTerm>
IntegratorVV<ForcingTerm>::IntegratorVV(const MirState *state, Loader& loader,
const ConfigObject& object) :
IntegratorVV(state, (const std::string&)object["name"],
loader.load<ForcingTerm>(object["forcingTerm"]))
{}
template<class ForcingTerm>
IntegratorVV<ForcingTerm>::~IntegratorVV() = default;
template<class ForcingTerm>
void IntegratorVV<ForcingTerm>::saveSnapshotAndRegister(Saver& saver)
{
std::string typeName = constructTypeName<ForcingTerm>("IntegratorVV");
saver.registerObject<IntegratorVV<ForcingTerm>>(
this, _saveSnapshot(saver, typeName));
}
template<class ForcingTerm>
ConfigObject IntegratorVV<ForcingTerm>::_saveSnapshot(
Saver& saver, const std::string& typeName)
{
ConfigObject config = Integrator::_saveSnapshot(saver, typeName);
config.emplace("forcingTerm", saver(forcingTerm_));
return config;
}
/**
* The new coordinates and velocities of a particle will be computed
* as follows:
* \f$
* \begin{cases}
* f'_p = ForcingTerm(f_p, x_p, v_p) \\
* v_{new} = v_p + \dfrac{f'_p}{m_p} \delta t \\
* x_{new} = x_p + v_{new} \, \delta t
* \end{cases}
* \f$
*
* @tparam ForcingTerm is a functor that can modify computed force
* per particles (typically add some force field). It has to
* provide two functions:
* - This function will be called once before integration and
* allows the functor to obtain required variables or data
* channels from the ParticleVector:
* \code setup(ParticleVector* pv, real t) \endcode
*
* - This should be a \c \_\_device\_\_ operator that modifies
* the force. It will be called for each particle during the
* integration:
* \code real3 operator()(real3 f0, Particle p) const \endcode
*
*/
template<class ForcingTerm>
void IntegratorVV<ForcingTerm>::execute(ParticleVector *pv, hipStream_t stream)
{
const auto t = static_cast<real>(getState()->currentTime);
const auto dt = static_cast<real>(getState()->dt);
static_assert(std::is_same<decltype(forcingTerm_.setup(pv, t)), void>::value,
"Forcing term functor must provide member"
"void setup(ParticleVector*, real)");
auto& fterm = forcingTerm_;
fterm.setup(pv, t);
auto st2 = [fterm] __device__ (Particle& p, real3 f, real invm, real dt)
{
const real3 modF = fterm(f, p);
p.u += modF * invm * dt;
p.r += p.u * dt;
};
integrate(pv, dt, st2, stream);
invalidatePV_(pv);
}
template class IntegratorVV<ForcingTermNone>;
template class IntegratorVV<ForcingTermConstDP>;
template class IntegratorVV<ForcingTermPeriodicPoiseuille>;
} // namespace mirheo
| 2ea8d728044c51ed74cb4c237da7a3e0de955114.cu | #include "vv.h"
#include "integration_kernel.h"
#include "forcing_terms/none.h"
#include "forcing_terms/const_dp.h"
#include "forcing_terms/periodic_poiseuille.h"
#include <mirheo/core/logger.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/utils/config.h>
#include <mirheo/core/utils/reflection.h>
namespace mirheo
{
template<class ForcingTerm>
IntegratorVV<ForcingTerm>::IntegratorVV(const MirState *state, const std::string& name, ForcingTerm forcingTerm) :
Integrator(state, name),
forcingTerm_(forcingTerm)
{}
template<class ForcingTerm>
IntegratorVV<ForcingTerm>::IntegratorVV(const MirState *state, Loader& loader,
const ConfigObject& object) :
IntegratorVV(state, (const std::string&)object["name"],
loader.load<ForcingTerm>(object["forcingTerm"]))
{}
template<class ForcingTerm>
IntegratorVV<ForcingTerm>::~IntegratorVV() = default;
template<class ForcingTerm>
void IntegratorVV<ForcingTerm>::saveSnapshotAndRegister(Saver& saver)
{
std::string typeName = constructTypeName<ForcingTerm>("IntegratorVV");
saver.registerObject<IntegratorVV<ForcingTerm>>(
this, _saveSnapshot(saver, typeName));
}
template<class ForcingTerm>
ConfigObject IntegratorVV<ForcingTerm>::_saveSnapshot(
Saver& saver, const std::string& typeName)
{
ConfigObject config = Integrator::_saveSnapshot(saver, typeName);
config.emplace("forcingTerm", saver(forcingTerm_));
return config;
}
/**
* The new coordinates and velocities of a particle will be computed
* as follows:
* \f$
* \begin{cases}
* f'_p = ForcingTerm(f_p, x_p, v_p) \\
* v_{new} = v_p + \dfrac{f'_p}{m_p} \delta t \\
* x_{new} = x_p + v_{new} \, \delta t
* \end{cases}
* \f$
*
* @tparam ForcingTerm is a functor that can modify computed force
* per particles (typically add some force field). It has to
* provide two functions:
* - This function will be called once before integration and
* allows the functor to obtain required variables or data
* channels from the ParticleVector:
* \code setup(ParticleVector* pv, real t) \endcode
*
* - This should be a \c \_\_device\_\_ operator that modifies
* the force. It will be called for each particle during the
* integration:
* \code real3 operator()(real3 f0, Particle p) const \endcode
*
*/
template<class ForcingTerm>
void IntegratorVV<ForcingTerm>::execute(ParticleVector *pv, cudaStream_t stream)
{
const auto t = static_cast<real>(getState()->currentTime);
const auto dt = static_cast<real>(getState()->dt);
static_assert(std::is_same<decltype(forcingTerm_.setup(pv, t)), void>::value,
"Forcing term functor must provide member"
"void setup(ParticleVector*, real)");
auto& fterm = forcingTerm_;
fterm.setup(pv, t);
auto st2 = [fterm] __device__ (Particle& p, real3 f, real invm, real dt)
{
const real3 modF = fterm(f, p);
p.u += modF * invm * dt;
p.r += p.u * dt;
};
integrate(pv, dt, st2, stream);
invalidatePV_(pv);
}
template class IntegratorVV<ForcingTermNone>;
template class IntegratorVV<ForcingTermConstDP>;
template class IntegratorVV<ForcingTermPeriodicPoiseuille>;
} // namespace mirheo
|
2c505ed16d446cea432be56385efd02988363f0b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
// const defines
#define NBIN 1000000000
#define NUM_BLOCK 4
#define NUM_THREAD 16
// struct to get time
struct timeval current_time = {0,0};
int tid;
float pi = 0, time_elapsed;
uint begin, end;
// function to calculate pi
__global__ void cal_pi(double *sum, int nbin, double step, int nthreads, int nblocks)
{
// var declaration
int i;
double x;
int idx = blockIdx.x*blockDim.x+threadIdx.x;
// pi equation calculation
for (i=idx; i< nbin; i+=nthreads*nblocks) {
x = i*step;
sum[idx] += double(4.0/(1.0+(x*x)));
}
}
int main()
{
// var declaration
double *sumDev;
double step = 1.0/NBIN; // dx
size_t size = NUM_BLOCK*NUM_THREAD*sizeof(float);
// initializing file in append mode to insert experiment data
FILE *f = fopen("/home/aac-pc/Daniel/experimental-log.txt", "a");
// alooc space to acc variable
hipMallocManaged(&sumDev, size);
// get initial time to evaluate performance
gettimeofday(¤t_time, NULL);
begin = current_time.tv_sec*1000000 + current_time.tv_usec;
// call function to calculate pi in threads
hipLaunchKernelGGL(( cal_pi), dim3(NUM_BLOCK), dim3(NUM_THREAD), 0, 0, sumDev, NBIN, step, NUM_THREAD, NUM_BLOCK);
// synchronize threads
hipDeviceSynchronize();
// get final time to evaluate performance
gettimeofday(¤t_time, NULL);
end = current_time.tv_sec*1000000 + current_time.tv_usec;
time_elapsed = end - begin;
// calculate pi final value
for(tid=0; tid<NUM_THREAD*NUM_BLOCK; tid++){
pi += sumDev[tid];
}
pi *= step;
// print final value in console and save data info in log file
printf("PI = %f\n",pi);
fprintf(f, "%d;%d;%f;%f\n", NUM_THREAD, NUM_BLOCK, pi, (time_elapsed/1000000));
// free cuda var
hipFree(sumDev);
// close file
fclose(f);
return 0;
} | 2c505ed16d446cea432be56385efd02988363f0b.cu | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
// const defines
#define NBIN 1000000000
#define NUM_BLOCK 4
#define NUM_THREAD 16
// struct to get time
struct timeval current_time = {0,0};
int tid;
float pi = 0, time_elapsed;
uint begin, end;
// function to calculate pi
__global__ void cal_pi(double *sum, int nbin, double step, int nthreads, int nblocks)
{
// var declaration
int i;
double x;
int idx = blockIdx.x*blockDim.x+threadIdx.x;
// pi equation calculation
for (i=idx; i< nbin; i+=nthreads*nblocks) {
x = i*step;
sum[idx] += double(4.0/(1.0+(x*x)));
}
}
int main()
{
// var declaration
double *sumDev;
double step = 1.0/NBIN; // dx
size_t size = NUM_BLOCK*NUM_THREAD*sizeof(float);
// initializing file in append mode to insert experiment data
FILE *f = fopen("/home/aac-pc/Daniel/experimental-log.txt", "a");
// alooc space to acc variable
cudaMallocManaged(&sumDev, size);
// get initial time to evaluate performance
gettimeofday(¤t_time, NULL);
begin = current_time.tv_sec*1000000 + current_time.tv_usec;
// call function to calculate pi in threads
cal_pi<<<NUM_BLOCK, NUM_THREAD>>>(sumDev, NBIN, step, NUM_THREAD, NUM_BLOCK);
// synchronize threads
cudaDeviceSynchronize();
// get final time to evaluate performance
gettimeofday(¤t_time, NULL);
end = current_time.tv_sec*1000000 + current_time.tv_usec;
time_elapsed = end - begin;
// calculate pi final value
for(tid=0; tid<NUM_THREAD*NUM_BLOCK; tid++){
pi += sumDev[tid];
}
pi *= step;
// print final value in console and save data info in log file
printf("PI = %f\n",pi);
fprintf(f, "%d;%d;%f;%f\n", NUM_THREAD, NUM_BLOCK, pi, (time_elapsed/1000000));
// free cuda var
cudaFree(sumDev);
// close file
fclose(f);
return 0;
} |
2512f55ef8888ee7b2d25d4a6c697e2866d15949.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* This example demonstrates the impact of misaligned writes on performance by
* forcing misaligned writes to occur on a float*.
*/
void checkResult(float *hostRef, float *gpuRef, const int N, const int offset)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = offset; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("different on %dth element: host %f gpu %f\n", i, hostRef[i],
gpuRef[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
void initialData(float *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip[i] = (float)( rand() & 0xFF ) / 100.0f;
}
return;
}
void sumArraysOnHost(float *A, float *B, float *C, const int n, int offset)
{
for (int idx = offset, k = 0; idx < n; idx++, k++)
{
C[idx] = A[k] + B[k];
}
}
__global__ void writeOffset(float *A, float *B, float *C, const int n,
int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[k] = A[i] + B[i];
}
__global__ void readOffset(float *A, float *B, float *C, const int n, int offset){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if(k < n){
C[i] = A[k] + B[k];
}
}
__global__ void readWriteOffset(float *A, float *B, float *C, const int n, int offset){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if(k < n){
C[k] = A[k] + B[k];
}
}
__global__ void warmup(float *A, float *B, float *C, const int n, int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[k] = A[i] + B[i];
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// set up array size
int nElem = 1 << 20; // total number of elements to reduce
printf(" with array size %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
// set up offset for summary
int blocksize = 512;
int offset = 0;
if (argc > 1) offset = atoi(argv[1]);
if (argc > 2) blocksize = atoi(argv[2]);
// execution configuration
dim3 block (blocksize, 1);
dim3 grid ((nElem + block.x - 1) / block.x, 1);
// allocate host memory
float *h_A = (float *)malloc(nBytes);
float *h_B = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
// initialize host array
initialData(h_A, nElem);
memcpy(h_B, h_A, nBytes);
// summary at host side
sumArraysOnHost(h_A, h_B, hostRef, nElem, offset);
// allocate device memory
float *d_A, *d_B, *d_C;
CHECK(hipMalloc((float**)&d_A, nBytes));
CHECK(hipMalloc((float**)&d_B, nBytes));
CHECK(hipMalloc((float**)&d_C, nBytes));
// copy data from host to device
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_A, nBytes, hipMemcpyHostToDevice));
// warmup
double iStart = cpuSecond();
hipLaunchKernelGGL(( warmup), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem, offset);
CHECK(hipDeviceSynchronize());
double iElaps = cpuSecond() - iStart;
printf("warmup <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x,
block.x, offset, iElaps);
CHECK(hipGetLastError());
// writeOffset
iStart = cpuSecond();
hipLaunchKernelGGL(( writeOffset), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem, offset);
CHECK(hipDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("writeOffset <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x,
block.x, offset, iElaps);
CHECK(hipGetLastError());
// readOffset
iStart = cpuSecond();
hipLaunchKernelGGL(( readOffset), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem, offset);
CHECK(hipDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("readOffset <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x,
block.x, offset, iElaps);
CHECK(hipGetLastError());
// readWriteOffset
iStart = cpuSecond();
hipLaunchKernelGGL(( readWriteOffset), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem, offset);
CHECK(hipDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("readWriteOffset <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x,
block.x, offset, iElaps);
CHECK(hipGetLastError());
// free host and device memory
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
free(h_A);
free(h_B);
// reset device
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| 2512f55ef8888ee7b2d25d4a6c697e2866d15949.cu | #include "../common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* This example demonstrates the impact of misaligned writes on performance by
* forcing misaligned writes to occur on a float*.
*/
void checkResult(float *hostRef, float *gpuRef, const int N, const int offset)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = offset; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("different on %dth element: host %f gpu %f\n", i, hostRef[i],
gpuRef[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
void initialData(float *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip[i] = (float)( rand() & 0xFF ) / 100.0f;
}
return;
}
void sumArraysOnHost(float *A, float *B, float *C, const int n, int offset)
{
for (int idx = offset, k = 0; idx < n; idx++, k++)
{
C[idx] = A[k] + B[k];
}
}
__global__ void writeOffset(float *A, float *B, float *C, const int n,
int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[k] = A[i] + B[i];
}
__global__ void readOffset(float *A, float *B, float *C, const int n, int offset){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if(k < n){
C[i] = A[k] + B[k];
}
}
__global__ void readWriteOffset(float *A, float *B, float *C, const int n, int offset){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if(k < n){
C[k] = A[k] + B[k];
}
}
__global__ void warmup(float *A, float *B, float *C, const int n, int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[k] = A[i] + B[i];
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up array size
int nElem = 1 << 20; // total number of elements to reduce
printf(" with array size %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
// set up offset for summary
int blocksize = 512;
int offset = 0;
if (argc > 1) offset = atoi(argv[1]);
if (argc > 2) blocksize = atoi(argv[2]);
// execution configuration
dim3 block (blocksize, 1);
dim3 grid ((nElem + block.x - 1) / block.x, 1);
// allocate host memory
float *h_A = (float *)malloc(nBytes);
float *h_B = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
// initialize host array
initialData(h_A, nElem);
memcpy(h_B, h_A, nBytes);
// summary at host side
sumArraysOnHost(h_A, h_B, hostRef, nElem, offset);
// allocate device memory
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((float**)&d_A, nBytes));
CHECK(cudaMalloc((float**)&d_B, nBytes));
CHECK(cudaMalloc((float**)&d_C, nBytes));
// copy data from host to device
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_A, nBytes, cudaMemcpyHostToDevice));
// warmup
double iStart = cpuSecond();
warmup<<<grid, block>>>(d_A, d_B, d_C, nElem, offset);
CHECK(cudaDeviceSynchronize());
double iElaps = cpuSecond() - iStart;
printf("warmup <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x,
block.x, offset, iElaps);
CHECK(cudaGetLastError());
// writeOffset
iStart = cpuSecond();
writeOffset<<<grid, block>>>(d_A, d_B, d_C, nElem, offset);
CHECK(cudaDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("writeOffset <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x,
block.x, offset, iElaps);
CHECK(cudaGetLastError());
// readOffset
iStart = cpuSecond();
readOffset<<<grid, block>>>(d_A, d_B, d_C, nElem, offset);
CHECK(cudaDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("readOffset <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x,
block.x, offset, iElaps);
CHECK(cudaGetLastError());
// readWriteOffset
iStart = cpuSecond();
readWriteOffset<<<grid, block>>>(d_A, d_B, d_C, nElem, offset);
CHECK(cudaDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("readWriteOffset <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x,
block.x, offset, iElaps);
CHECK(cudaGetLastError());
// free host and device memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
free(h_A);
free(h_B);
// reset device
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
d4c09916eaf577d8ce006ca93f696fcecb88c020.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@author Mark Gates
@generated from zgemv_fermi.cu normal z -> s, Fri Jan 30 19:00:10 2015
*/
#include "common_magma.h"
#include "commonblas_s.h"
#include "magma_templates.h"
#define PRECISION_s
#define BLK_X 128
#define BLK_Y 128
/* Compute y = alpha*A*x + beta*y.
* Each thread block does a BLK_X x N block row of A.
* Each thread goes across one row, accumulating dot product of row ind and x into res.
* This simple implementation loads x directly, relying on the cache,
* without using shared memory.
*/
__global__ void
sgemvn_kernel1_fermi(
int m, int n, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, int incx, float beta,
float * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
int ind = blockIdx.x*BLK_X + threadIdx.x;
if ( ind < m ) {
A += ind;
float res = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < n; j++) {
res += A[j*lda] * x[j*incx];
}
y[ind*incy] = alpha*res + beta*y[ind*incy];
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/* Compute y = alpha*A*x + beta*y.
* Each thread block does a BLK_X x N block row of A.
* Each thread goes across one row, accumulating dot product of row ind and x into res.
* This implementation loads BLK_Y elements into sx, then multiplies
* BLK_Y columns of A*sx.
*/
__global__ void
sgemvn_kernel2_fermi(
int m, int n, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, int incx, float beta,
float * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
int ind = blockIdx.x*BLK_X + threadIdx.x;
// threads past last row redundantly work on last row
A += min( ind, m-1 );
x += threadIdx.x*incx;
float res = MAGMA_S_ZERO;
__shared__ float sx[BLK_Y];
// full block-columns
int nfull = (n / BLK_Y) * BLK_Y;
for( int j=0; j < nfull; j += BLK_Y ) {
// load BLK_Y elements of x into sx
sx[threadIdx.x] = x[0];
x += BLK_Y*incx;
__syncthreads();
// multiply A*sx
#pragma unroll
for(int j2=0; j2 < BLK_Y; j2++) {
res += A[0] * sx[j2];
A += lda;
}
__syncthreads();
}
// last, partial block-column
// load remaining npart elements of x into sx
int npart = n % BLK_Y;
if ( threadIdx.x < npart ) {
sx[threadIdx.x] = x[0];
}
else {
sx[threadIdx.x] = MAGMA_S_ZERO;
}
__syncthreads();
// multiply A*sx
#pragma unroll
for(int j2=0; j2 < npart; j2++) {
res += A[0]*sx[j2];
A += lda;
}
if ( ind < m ) {
y[ind*incy] = alpha*res + beta*y[ind*incy];
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/* Compute y = alpha * A^T * x + beta*y.
* Each thread block does one column of A (i.e., one row of A^T).
* Each thread does a partial sum, then collectively they do a reduction.
*/
__global__ void
sgemvt_kernel_fermi(
int m, int n, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, int incx, float beta,
float * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
__shared__ float sdata[BLK_X];
float res = MAGMA_S_ZERO;
A += blockIdx.y*lda + threadIdx.x;
// partial sums
int mfull = (m / BLK_X) * BLK_X;
for(int i=0; i < mfull; i += BLK_X) {
res += A[i] * x[tx + i];
}
if ( tx + mfull < m ) {
res += A[mfull] * x[tx + mfull];
}
sdata[tx] = res;
// tree reduction of partial sums,
// from BLK_X sums to ... 128 to 64 to 32 ... to 1 sum in sdata[0]
magma_sum_reduce< BLK_X >( tx, sdata );
if ( tx == 0 ) {
y[blockIdx.y*incy] = alpha*sdata[0] + beta*y[blockIdx.y*incy];
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/* Compute y = alpha * A^H * x + beta*y.
* Same as sgemvt_kernel_fermi but conjugates entries of A.
*/
__global__ void
sgemvc_kernel_fermi(
int m, int n, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, int incx, float beta,
float * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
__shared__ float sdata[BLK_X];
float res = MAGMA_S_ZERO;
A += blockIdx.y*lda + threadIdx.x;
// partial sums
int mfull = (m / BLK_X) * BLK_X;
for(int i=0; i < mfull; i += BLK_X) {
res += conj(A[i]) * x[tx + i];
}
if ( tx + mfull < m ) {
res += conj(A[mfull]) * x[tx + mfull];
}
sdata[tx] = res;
// tree reduction of partial sums,
// from BLK_X sums to ... 128 to 64 to 32 ... to 1 sum in sdata[0]
magma_sum_reduce< BLK_X >( tx, sdata );
if ( tx == 0 ) {
y[blockIdx.y*incy] = alpha*sdata[0] + beta*y[blockIdx.y*incy];
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/**
Purpose
-------
SGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or
y := alpha*A**T*x + beta*y, or
y := alpha*A**H*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
trans magma_trans_t
On entry, TRANS specifies the operation to be performed as
follows:
- = MagmaNoTrans: y := alpha*A *x + beta*y
- = MagmaTrans: y := alpha*A^T*x + beta*y
- = MagmaConjTrans: y := alpha*A^H*x + beta*y
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA REAL array of dimension ( LDA, n ) on the GPU.
@param[in]
lda INTEGER
LDA specifies the leading dimension of A.
@param[in]
dx REAL array of dimension
n if trans == MagmaNoTrans
m if trans == MagmaTrans or MagmaConjTrans
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta DOUBLE REAL
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy REAL array of dimension
m if trans == MagmaNoTrans
n if trans == MagmaTrans or MagmaConjTrans
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@ingroup magma_dblas2
********************************************************************/
extern "C" void
magmablas_sgemv(
magma_trans_t trans, magma_int_t m, magma_int_t n, float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_const_ptr dx, magma_int_t incx,
float beta,
magmaFloat_ptr dy, magma_int_t incy)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sd] precisions, cublas for [zc] precisions.
#if defined(PRECISION_z) || defined(PRECISION_c)
magma_sgemv( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
#else
magmablas_sgemv_tesla( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
#endif
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( trans == MagmaNoTrans ) {
dim3 grid( (m - 1)/BLK_X + 1 );
dim3 threads( BLK_X, 1, 1 );
hipLaunchKernelGGL(( sgemvn_kernel1_fermi), dim3(grid), dim3(threads), 0, magma_stream ,
m, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
}
else if ( trans == MagmaTrans ) {
dim3 grid ( 1, n, 1 );
dim3 threads ( BLK_X, 1, 1 );
hipLaunchKernelGGL(( sgemvt_kernel_fermi), dim3(grid), dim3(threads), 0, magma_stream ,
m, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
}
else if ( trans == MagmaConjTrans ) {
dim3 grid ( 1, n, 1 );
dim3 threads ( BLK_X, 1, 1 );
hipLaunchKernelGGL(( sgemvc_kernel_fermi), dim3(grid), dim3(threads), 0, magma_stream ,
m, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
}
}
| d4c09916eaf577d8ce006ca93f696fcecb88c020.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@author Mark Gates
@generated from zgemv_fermi.cu normal z -> s, Fri Jan 30 19:00:10 2015
*/
#include "common_magma.h"
#include "commonblas_s.h"
#include "magma_templates.h"
#define PRECISION_s
#define BLK_X 128
#define BLK_Y 128
/* Compute y = alpha*A*x + beta*y.
* Each thread block does a BLK_X x N block row of A.
* Each thread goes across one row, accumulating dot product of row ind and x into res.
* This simple implementation loads x directly, relying on the cache,
* without using shared memory.
*/
__global__ void
sgemvn_kernel1_fermi(
int m, int n, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, int incx, float beta,
float * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
int ind = blockIdx.x*BLK_X + threadIdx.x;
if ( ind < m ) {
A += ind;
float res = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < n; j++) {
res += A[j*lda] * x[j*incx];
}
y[ind*incy] = alpha*res + beta*y[ind*incy];
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/* Compute y = alpha*A*x + beta*y.
* Each thread block does a BLK_X x N block row of A.
* Each thread goes across one row, accumulating dot product of row ind and x into res.
* This implementation loads BLK_Y elements into sx, then multiplies
* BLK_Y columns of A*sx.
*/
__global__ void
sgemvn_kernel2_fermi(
int m, int n, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, int incx, float beta,
float * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
int ind = blockIdx.x*BLK_X + threadIdx.x;
// threads past last row redundantly work on last row
A += min( ind, m-1 );
x += threadIdx.x*incx;
float res = MAGMA_S_ZERO;
__shared__ float sx[BLK_Y];
// full block-columns
int nfull = (n / BLK_Y) * BLK_Y;
for( int j=0; j < nfull; j += BLK_Y ) {
// load BLK_Y elements of x into sx
sx[threadIdx.x] = x[0];
x += BLK_Y*incx;
__syncthreads();
// multiply A*sx
#pragma unroll
for(int j2=0; j2 < BLK_Y; j2++) {
res += A[0] * sx[j2];
A += lda;
}
__syncthreads();
}
// last, partial block-column
// load remaining npart elements of x into sx
int npart = n % BLK_Y;
if ( threadIdx.x < npart ) {
sx[threadIdx.x] = x[0];
}
else {
sx[threadIdx.x] = MAGMA_S_ZERO;
}
__syncthreads();
// multiply A*sx
#pragma unroll
for(int j2=0; j2 < npart; j2++) {
res += A[0]*sx[j2];
A += lda;
}
if ( ind < m ) {
y[ind*incy] = alpha*res + beta*y[ind*incy];
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/* Compute y = alpha * A^T * x + beta*y.
* Each thread block does one column of A (i.e., one row of A^T).
* Each thread does a partial sum, then collectively they do a reduction.
*/
__global__ void
sgemvt_kernel_fermi(
int m, int n, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, int incx, float beta,
float * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
__shared__ float sdata[BLK_X];
float res = MAGMA_S_ZERO;
A += blockIdx.y*lda + threadIdx.x;
// partial sums
int mfull = (m / BLK_X) * BLK_X;
for(int i=0; i < mfull; i += BLK_X) {
res += A[i] * x[tx + i];
}
if ( tx + mfull < m ) {
res += A[mfull] * x[tx + mfull];
}
sdata[tx] = res;
// tree reduction of partial sums,
// from BLK_X sums to ... 128 to 64 to 32 ... to 1 sum in sdata[0]
magma_sum_reduce< BLK_X >( tx, sdata );
if ( tx == 0 ) {
y[blockIdx.y*incy] = alpha*sdata[0] + beta*y[blockIdx.y*incy];
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/* Compute y = alpha * A^H * x + beta*y.
* Same as sgemvt_kernel_fermi but conjugates entries of A.
*/
__global__ void
sgemvc_kernel_fermi(
int m, int n, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, int incx, float beta,
float * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
__shared__ float sdata[BLK_X];
float res = MAGMA_S_ZERO;
A += blockIdx.y*lda + threadIdx.x;
// partial sums
int mfull = (m / BLK_X) * BLK_X;
for(int i=0; i < mfull; i += BLK_X) {
res += conj(A[i]) * x[tx + i];
}
if ( tx + mfull < m ) {
res += conj(A[mfull]) * x[tx + mfull];
}
sdata[tx] = res;
// tree reduction of partial sums,
// from BLK_X sums to ... 128 to 64 to 32 ... to 1 sum in sdata[0]
magma_sum_reduce< BLK_X >( tx, sdata );
if ( tx == 0 ) {
y[blockIdx.y*incy] = alpha*sdata[0] + beta*y[blockIdx.y*incy];
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/**
Purpose
-------
SGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or
y := alpha*A**T*x + beta*y, or
y := alpha*A**H*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
trans magma_trans_t
On entry, TRANS specifies the operation to be performed as
follows:
- = MagmaNoTrans: y := alpha*A *x + beta*y
- = MagmaTrans: y := alpha*A^T*x + beta*y
- = MagmaConjTrans: y := alpha*A^H*x + beta*y
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA REAL array of dimension ( LDA, n ) on the GPU.
@param[in]
lda INTEGER
LDA specifies the leading dimension of A.
@param[in]
dx REAL array of dimension
n if trans == MagmaNoTrans
m if trans == MagmaTrans or MagmaConjTrans
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta DOUBLE REAL
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy REAL array of dimension
m if trans == MagmaNoTrans
n if trans == MagmaTrans or MagmaConjTrans
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@ingroup magma_dblas2
********************************************************************/
extern "C" void
magmablas_sgemv(
magma_trans_t trans, magma_int_t m, magma_int_t n, float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_const_ptr dx, magma_int_t incx,
float beta,
magmaFloat_ptr dy, magma_int_t incy)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sd] precisions, cublas for [zc] precisions.
#if defined(PRECISION_z) || defined(PRECISION_c)
magma_sgemv( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
#else
magmablas_sgemv_tesla( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
#endif
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( trans == MagmaNoTrans ) {
dim3 grid( (m - 1)/BLK_X + 1 );
dim3 threads( BLK_X, 1, 1 );
sgemvn_kernel1_fermi<<< grid, threads, 0, magma_stream >>>
( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
}
else if ( trans == MagmaTrans ) {
dim3 grid ( 1, n, 1 );
dim3 threads ( BLK_X, 1, 1 );
sgemvt_kernel_fermi<<< grid, threads, 0, magma_stream >>>
( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
}
else if ( trans == MagmaConjTrans ) {
dim3 grid ( 1, n, 1 );
dim3 threads ( BLK_X, 1, 1 );
sgemvc_kernel_fermi<<< grid, threads, 0, magma_stream >>>
( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
}
}
|
d8ccfba6350237828bcc114e35bd815c57c44c9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgeellrtmv.cu normal z -> c, Wed Sep 17 15:08:43 2014
*/
#include "common_magma.h"
//F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University
__global__ void
cgeellrtmv_kernel_32( int num_rows,
int num_cols,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d_x[ col ];
}
shared[idb] = dot;
if( idp < 16 ){
shared[idb]+=shared[idb+16];
if( idp < 8 ) shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i];
}
}
}
}
//F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University
__global__ void
cgeellrtmv_kernel_16( int num_rows,
int num_cols,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d_x[ col ];
}
shared[idb] = dot;
if( idp < 8 ){
shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i];
}
}
}
}
//F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University
__global__ void
cgeellrtmv_kernel_8( int num_rows,
int num_cols,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d_x[ col ];
}
shared[idb] = dot;
if( idp < 4 ){
shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i];
}
}
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
Input format is ELLRT. The ideas are taken from
"Improving the performance of the sparse matrix
vector product with GPUs", (CIT 2010),
and modified to provide correct values.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows
@param
n magma_int_t
number of columns
@param
nnz_per_row magma_int_t
max number of nonzeros in a row
@param
alpha magmaFloatComplex
scalar alpha
@param
d_val magmaFloatComplex*
val array
@param
d_colind magma_int_t*
col indices
@param
d_rowlength magma_int_t*
number of elements in each row
@param
d_x magmaFloatComplex*
input vector x
@param
beta magmaFloatComplex
scalar beta
@param
d_y magmaFloatComplex*
output vector y
@param
blocksize magma_int_t
threads per block
@param
alignment magma_int_t
threads assigned to each row
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgeellrtmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y,
magma_int_t alignment,
magma_int_t blocksize ){
int num_blocks = ( (m+blocksize-1)/blocksize);
int num_threads = alignment*blocksize;
int real_row_length = ((int)(nnz_per_row+alignment-1)/alignment)
*alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = sqrt(num_blocks);
int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = alignment * blocksize * sizeof( magmaFloatComplex );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms);
if( alignment == 32 ){
hipLaunchKernelGGL(( cgeellrtmv_kernel_32), dim3(grid), dim3(num_threads) , Ms, magma_stream ,
m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y,
alignment, real_row_length );
}
else if( alignment == 16 ){
hipLaunchKernelGGL(( cgeellrtmv_kernel_16), dim3(grid), dim3(num_threads) , Ms, magma_stream ,
m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y,
alignment, real_row_length );
}
else if( alignment == 8 ){
hipLaunchKernelGGL(( cgeellrtmv_kernel_8), dim3(grid), dim3(num_threads) , Ms, magma_stream ,
m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y,
alignment, real_row_length );
}
else{
printf("error: alignment %d not supported.\n", alignment);
exit(-1);
}
return MAGMA_SUCCESS;
}
| d8ccfba6350237828bcc114e35bd815c57c44c9f.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgeellrtmv.cu normal z -> c, Wed Sep 17 15:08:43 2014
*/
#include "common_magma.h"
//F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University
__global__ void
cgeellrtmv_kernel_32( int num_rows,
int num_cols,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d_x[ col ];
}
shared[idb] = dot;
if( idp < 16 ){
shared[idb]+=shared[idb+16];
if( idp < 8 ) shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i];
}
}
}
}
//F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University
__global__ void
cgeellrtmv_kernel_16( int num_rows,
int num_cols,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d_x[ col ];
}
shared[idb] = dot;
if( idp < 8 ){
shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i];
}
}
}
}
//F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University
__global__ void
cgeellrtmv_kernel_8( int num_rows,
int num_cols,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d_x[ col ];
}
shared[idb] = dot;
if( idp < 4 ){
shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i];
}
}
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
Input format is ELLRT. The ideas are taken from
"Improving the performance of the sparse matrix
vector product with GPUs", (CIT 2010),
and modified to provide correct values.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows
@param
n magma_int_t
number of columns
@param
nnz_per_row magma_int_t
max number of nonzeros in a row
@param
alpha magmaFloatComplex
scalar alpha
@param
d_val magmaFloatComplex*
val array
@param
d_colind magma_int_t*
col indices
@param
d_rowlength magma_int_t*
number of elements in each row
@param
d_x magmaFloatComplex*
input vector x
@param
beta magmaFloatComplex
scalar beta
@param
d_y magmaFloatComplex*
output vector y
@param
blocksize magma_int_t
threads per block
@param
alignment magma_int_t
threads assigned to each row
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgeellrtmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y,
magma_int_t alignment,
magma_int_t blocksize ){
int num_blocks = ( (m+blocksize-1)/blocksize);
int num_threads = alignment*blocksize;
int real_row_length = ((int)(nnz_per_row+alignment-1)/alignment)
*alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = sqrt(num_blocks);
int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = alignment * blocksize * sizeof( magmaFloatComplex );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms);
if( alignment == 32 ){
cgeellrtmv_kernel_32<<< grid, num_threads , Ms, magma_stream >>>
( m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y,
alignment, real_row_length );
}
else if( alignment == 16 ){
cgeellrtmv_kernel_16<<< grid, num_threads , Ms, magma_stream >>>
( m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y,
alignment, real_row_length );
}
else if( alignment == 8 ){
cgeellrtmv_kernel_8<<< grid, num_threads , Ms, magma_stream >>>
( m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y,
alignment, real_row_length );
}
else{
printf("error: alignment %d not supported.\n", alignment);
exit(-1);
}
return MAGMA_SUCCESS;
}
|
70a6241071f681c732aefe3f6b33d290de98426d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** @file
* @brief Definition of class CudaZeroCrossFilter.
*
* @author Jan Bobek
* @since 12th April 2015
*/
#include "edetect.hxx"
#include "IImage.hxx"
#include "cuda/CudaError.hxx"
#include "cuda/CudaZeroCrossFilter.hxx"
/**
* @brief CUDA kernel for detection
* of zero-crossings.
*
* @param[out] ddata
* The destination image data.
* @param[in] dstride
* Size of the row stride in destination data.
* @param[in] sdata
* The source image data.
* @param[in] sstride
* Size of the row stride in source data.
* @param[in] rows
* Number of rows in the image.
* @param[in] cols
* Number of columns in the image.
*/
__global__ void
detectZeroCrossKernel(
unsigned char* ddata,
unsigned int dstride,
const unsigned char* sdata,
unsigned int sstride,
unsigned int rows,
unsigned int cols
)
{
const unsigned int col =
blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int row =
blockIdx.y * blockDim.y + threadIdx.y;
if( !(row < rows && col < cols) )
return;
float* const dstp =
(float*)(ddata + row * dstride) + col;
const unsigned char* const srcp =
sdata + row * sstride + col * sizeof(float);
const float* const tp =
(const float*)(srcp - sstride);
const float* const mp =
(const float*)srcp;
const float* const bp =
(const float*)(srcp + sstride);
*dstp = (1 <= row &&
1 <= col &&
row < (rows - 1) &&
col < (cols - 1) &&
(0 > mp[-1] * mp[ 1] ||
0 > tp[ 0] * bp[ 0] ||
0 > tp[-1] * bp[ 1] ||
0 > tp[ 1] * bp[-1])
? 1.0f : 0.0f);
}
/*************************************************************************/
/* CudaZeroCrossFilter */
/*************************************************************************/
void
CudaZeroCrossFilter::detectZeroCross(
IImage& dest,
const IImage& src
)
{
// 32 = warp size, 8 * 32 = 256 threads
const dim3 threadsPerBlock(32, 8);
const dim3 numBlocks(
(src.columns() + threadsPerBlock.x - 1) / threadsPerBlock.x,
(src.rows() + threadsPerBlock.y - 1) / threadsPerBlock.y );
hipLaunchKernelGGL(( detectZeroCrossKernel), dim3(numBlocks), dim3(threadsPerBlock) , 0, 0,
dest.data(), dest.stride(),
src.data(), src.stride(),
src.rows(), src.columns()
);
cudaCheckLastError( "CudaZeroCrossFilter: zero-cross kernel launch failed" );
cudaMsgCheckError( hipDeviceSynchronize(), "CudaZeroCrossFilter: zero-cross kernel run failed" );
}
| 70a6241071f681c732aefe3f6b33d290de98426d.cu | /** @file
* @brief Definition of class CudaZeroCrossFilter.
*
* @author Jan Bobek
* @since 12th April 2015
*/
#include "edetect.hxx"
#include "IImage.hxx"
#include "cuda/CudaError.hxx"
#include "cuda/CudaZeroCrossFilter.hxx"
/**
* @brief CUDA kernel for detection
* of zero-crossings.
*
* @param[out] ddata
* The destination image data.
* @param[in] dstride
* Size of the row stride in destination data.
* @param[in] sdata
* The source image data.
* @param[in] sstride
* Size of the row stride in source data.
* @param[in] rows
* Number of rows in the image.
* @param[in] cols
* Number of columns in the image.
*/
__global__ void
detectZeroCrossKernel(
unsigned char* ddata,
unsigned int dstride,
const unsigned char* sdata,
unsigned int sstride,
unsigned int rows,
unsigned int cols
)
{
const unsigned int col =
blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int row =
blockIdx.y * blockDim.y + threadIdx.y;
if( !(row < rows && col < cols) )
return;
float* const dstp =
(float*)(ddata + row * dstride) + col;
const unsigned char* const srcp =
sdata + row * sstride + col * sizeof(float);
const float* const tp =
(const float*)(srcp - sstride);
const float* const mp =
(const float*)srcp;
const float* const bp =
(const float*)(srcp + sstride);
*dstp = (1 <= row &&
1 <= col &&
row < (rows - 1) &&
col < (cols - 1) &&
(0 > mp[-1] * mp[ 1] ||
0 > tp[ 0] * bp[ 0] ||
0 > tp[-1] * bp[ 1] ||
0 > tp[ 1] * bp[-1])
? 1.0f : 0.0f);
}
/*************************************************************************/
/* CudaZeroCrossFilter */
/*************************************************************************/
void
CudaZeroCrossFilter::detectZeroCross(
IImage& dest,
const IImage& src
)
{
// 32 = warp size, 8 * 32 = 256 threads
const dim3 threadsPerBlock(32, 8);
const dim3 numBlocks(
(src.columns() + threadsPerBlock.x - 1) / threadsPerBlock.x,
(src.rows() + threadsPerBlock.y - 1) / threadsPerBlock.y );
detectZeroCrossKernel<<< numBlocks, threadsPerBlock >>>(
dest.data(), dest.stride(),
src.data(), src.stride(),
src.rows(), src.columns()
);
cudaCheckLastError( "CudaZeroCrossFilter: zero-cross kernel launch failed" );
cudaMsgCheckError( cudaDeviceSynchronize(), "CudaZeroCrossFilter: zero-cross kernel run failed" );
}
|
e1a98837f7b9c39b884351eb3802c5340f58d941.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
__global__ void kernel(int* a) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
//a[idx] = blockIdx.x;
a[idx] = sizeof(int);
}
int main() {
const int N = 1024;
const int size = N * sizeof(int);
int* a;
int* da = 0;
a = (int*)malloc(size);
hipMalloc(&da, size);
kernel << <N / 4, 4 >> > (da);
hipMemcpy(a, da, size, hipMemcpyDeviceToHost);
for (int i = 0; i < 30; ++i)
std::cout << a[i] << ' ';
std::cout << std::endl;
hipFree(da);
free(a);
} | e1a98837f7b9c39b884351eb3802c5340f58d941.cu | #include <iostream>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
__global__ void kernel(int* a) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
//a[idx] = blockIdx.x;
a[idx] = sizeof(int);
}
int main() {
const int N = 1024;
const int size = N * sizeof(int);
int* a;
int* da = 0;
a = (int*)malloc(size);
cudaMalloc(&da, size);
kernel << <N / 4, 4 >> > (da);
cudaMemcpy(a, da, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < 30; ++i)
std::cout << a[i] << ' ';
std::cout << std::endl;
cudaFree(da);
free(a);
} |
ab7aafb7405f49d37b53ab62507b0d37c5473e0c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Oleh Semeniv (oleg.semeniv@gmail.com)
//
#include <system/op_boilerplate.h>
#include <ops/declarable/helpers/updatersHelpers.h>
#include <helpers/PointersManager.h>
#include <math/platformmath.h>
#include <math/templatemath.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ void adaDeltaUpdaterCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vinMsg, const Nd4jLong* inMsgShapeInfo,
const void* vinMsdx, const Nd4jLong* inMsdxShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vstMsg,
const Nd4jLong* stMsgShapeInfo, void* vstMsdx, const Nd4jLong* stMsdxShapeInfo, const T rho, const T epsilon) {
const auto grad = reinterpret_cast<const T*>(vx);
const auto initMsg= reinterpret_cast<const T*>(vinMsg);
const auto initMsdx = reinterpret_cast<const T*>(vinMsdx);
auto up = reinterpret_cast<T*>(vz);
auto stMsg = reinterpret_cast<T*>(vstMsg);
auto stMsdx = reinterpret_cast<T*>(vstMsdx);
__shared__ Nd4jLong xLen;
__shared__ T rhoT;
__shared__ bool bEWS, bOrdering, bXZsame, bXInMsgSame, bXStMsgSame, bXInMsdxSame, bXStMsdxSame;
if (threadIdx.x == 0) {
xLen = shape::length(xShapeInfo);
rhoT = (1 - rho);
bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) &&
1 == shape::elementWiseStride(stMsgShapeInfo) && 1 == shape::elementWiseStride(inMsgShapeInfo) &&
1 == shape::elementWiseStride(stMsdxShapeInfo) && 1 == shape::elementWiseStride(inMsdxShapeInfo);
bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) && shape::order(zShapeInfo) == shape::order(stMsgShapeInfo) &&
shape::order(stMsgShapeInfo) == shape::order(inMsgShapeInfo) && shape::order(inMsgShapeInfo) == shape::order(stMsdxShapeInfo) &&
shape::order(stMsdxShapeInfo) == shape::order(inMsdxShapeInfo);
bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
bXInMsgSame = shape::haveSameShapeAndStrides(xShapeInfo, inMsgShapeInfo);
bXStMsgSame = shape::haveSameShapeAndStrides(xShapeInfo, stMsgShapeInfo);
bXInMsdxSame = shape::haveSameShapeAndStrides(xShapeInfo, inMsdxShapeInfo);
bXStMsdxSame = shape::haveSameShapeAndStrides(xShapeInfo, stMsdxShapeInfo);
}
__syncthreads();
int coords[MAX_RANK];
for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) {
auto xOffset = i, zOffset = i, initMsgOffset = i, initMsdxOffset = i, stMsgOffset = i, stMsdxOffset = i;
if (!bEWS || !bOrdering){
shape::index2coords(i, xShapeInfo, coords);
xOffset = shape::getOffset(xShapeInfo, coords);
zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords);
initMsgOffset = bXInMsgSame ? xOffset : shape::getOffset(inMsgShapeInfo, coords);
stMsgOffset = bXStMsgSame ? xOffset : shape::getOffset(stMsgShapeInfo, coords);
initMsdxOffset = bXInMsdxSame ? xOffset : shape::getOffset(inMsdxShapeInfo, coords);
stMsdxOffset = bXStMsdxSame ? xOffset : shape::getOffset(stMsdxShapeInfo, coords);
}
stMsg[stMsgOffset] = rho * initMsg[initMsgOffset] + grad[xOffset] * grad[xOffset] * rhoT;
up[zOffset] = grad[xOffset] * (sd::math::nd4j_sqrt<T, T>(initMsdx[initMsdxOffset] + epsilon) / sd::math::nd4j_sqrt<T, T>(stMsg[stMsgOffset] + epsilon));
stMsdx[stMsdxOffset] = rho * initMsdx[initMsdxOffset] + up[zOffset] * up[zOffset] * rhoT;
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
linkage void adaDeltaUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t* stream, const void* vx, const Nd4jLong* xShapeInfo,
const void* vinMsg, const Nd4jLong* inMsgShapeInfo, const void* vinMsdx, const Nd4jLong* inMsdxShapeInfo,
void* vz, const Nd4jLong* zShapeInfo, void* vstMsg, const Nd4jLong* stMsgShapeInfo,
void* vstMsdx, const Nd4jLong* stMsdxShapeInfo, const double dRho, const double dEpsilon) {
const T rho = static_cast<T>(dRho);
const T epsilon = static_cast<T>(dEpsilon);
adaDeltaUpdaterCuda<T> << <blocksPerGrid, threadsPerBlock, 256, * stream >> > (vx, xShapeInfo, vinMsg, inMsgShapeInfo,
vinMsdx, inMsdxShapeInfo, vz, zShapeInfo, vstMsg, stMsgShapeInfo, vstMsdx, stMsdxShapeInfo, rho, epsilon);
}
///////////////////////////////////////////////////////////////////
void updaterAdaDelta(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initStateMsg, const NDArray& initStateMsdx,
NDArray& update, NDArray& stateMsg, NDArray& stateMsdx, const double dRho, const double dEpsilon) {
PointersManager manager(context, "adaDeltaUpdater");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradient.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
NDArray::prepareSpecialUse({ &update, &stateMsg, &stateMsdx }, { &gradient, &initStateMsg, &initStateMsdx });
BUILD_SINGLE_SELECTOR(gradient.dataType(), adaDeltaUpdaterCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), gradient.getSpecialBuffer(), gradient.getSpecialShapeInfo(),
initStateMsg.getSpecialBuffer(), initStateMsg.getSpecialShapeInfo(), initStateMsdx.getSpecialBuffer(), initStateMsdx.getSpecialShapeInfo(),
update.getSpecialBuffer(), update.getSpecialShapeInfo(),stateMsg.getSpecialBuffer(), stateMsg.getSpecialShapeInfo(),
stateMsdx.getSpecialBuffer(), stateMsdx.getSpecialShapeInfo(), dRho, dEpsilon), FLOAT_TYPES);
NDArray::registerSpecialUse({ &update, &stateMsg, &stateMsdx }, { &gradient, &initStateMsg, &initStateMsdx });
manager.synchronize();
}
}
}
}
| ab7aafb7405f49d37b53ab62507b0d37c5473e0c.cu | /*******************************************************************************
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Oleh Semeniv (oleg.semeniv@gmail.com)
//
#include <system/op_boilerplate.h>
#include <ops/declarable/helpers/updatersHelpers.h>
#include <helpers/PointersManager.h>
#include <math/platformmath.h>
#include <math/templatemath.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ void adaDeltaUpdaterCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vinMsg, const Nd4jLong* inMsgShapeInfo,
const void* vinMsdx, const Nd4jLong* inMsdxShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vstMsg,
const Nd4jLong* stMsgShapeInfo, void* vstMsdx, const Nd4jLong* stMsdxShapeInfo, const T rho, const T epsilon) {
const auto grad = reinterpret_cast<const T*>(vx);
const auto initMsg= reinterpret_cast<const T*>(vinMsg);
const auto initMsdx = reinterpret_cast<const T*>(vinMsdx);
auto up = reinterpret_cast<T*>(vz);
auto stMsg = reinterpret_cast<T*>(vstMsg);
auto stMsdx = reinterpret_cast<T*>(vstMsdx);
__shared__ Nd4jLong xLen;
__shared__ T rhoT;
__shared__ bool bEWS, bOrdering, bXZsame, bXInMsgSame, bXStMsgSame, bXInMsdxSame, bXStMsdxSame;
if (threadIdx.x == 0) {
xLen = shape::length(xShapeInfo);
rhoT = (1 - rho);
bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) &&
1 == shape::elementWiseStride(stMsgShapeInfo) && 1 == shape::elementWiseStride(inMsgShapeInfo) &&
1 == shape::elementWiseStride(stMsdxShapeInfo) && 1 == shape::elementWiseStride(inMsdxShapeInfo);
bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) && shape::order(zShapeInfo) == shape::order(stMsgShapeInfo) &&
shape::order(stMsgShapeInfo) == shape::order(inMsgShapeInfo) && shape::order(inMsgShapeInfo) == shape::order(stMsdxShapeInfo) &&
shape::order(stMsdxShapeInfo) == shape::order(inMsdxShapeInfo);
bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
bXInMsgSame = shape::haveSameShapeAndStrides(xShapeInfo, inMsgShapeInfo);
bXStMsgSame = shape::haveSameShapeAndStrides(xShapeInfo, stMsgShapeInfo);
bXInMsdxSame = shape::haveSameShapeAndStrides(xShapeInfo, inMsdxShapeInfo);
bXStMsdxSame = shape::haveSameShapeAndStrides(xShapeInfo, stMsdxShapeInfo);
}
__syncthreads();
int coords[MAX_RANK];
for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) {
auto xOffset = i, zOffset = i, initMsgOffset = i, initMsdxOffset = i, stMsgOffset = i, stMsdxOffset = i;
if (!bEWS || !bOrdering){
shape::index2coords(i, xShapeInfo, coords);
xOffset = shape::getOffset(xShapeInfo, coords);
zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords);
initMsgOffset = bXInMsgSame ? xOffset : shape::getOffset(inMsgShapeInfo, coords);
stMsgOffset = bXStMsgSame ? xOffset : shape::getOffset(stMsgShapeInfo, coords);
initMsdxOffset = bXInMsdxSame ? xOffset : shape::getOffset(inMsdxShapeInfo, coords);
stMsdxOffset = bXStMsdxSame ? xOffset : shape::getOffset(stMsdxShapeInfo, coords);
}
stMsg[stMsgOffset] = rho * initMsg[initMsgOffset] + grad[xOffset] * grad[xOffset] * rhoT;
up[zOffset] = grad[xOffset] * (sd::math::nd4j_sqrt<T, T>(initMsdx[initMsdxOffset] + epsilon) / sd::math::nd4j_sqrt<T, T>(stMsg[stMsgOffset] + epsilon));
stMsdx[stMsdxOffset] = rho * initMsdx[initMsdxOffset] + up[zOffset] * up[zOffset] * rhoT;
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
linkage void adaDeltaUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t* stream, const void* vx, const Nd4jLong* xShapeInfo,
const void* vinMsg, const Nd4jLong* inMsgShapeInfo, const void* vinMsdx, const Nd4jLong* inMsdxShapeInfo,
void* vz, const Nd4jLong* zShapeInfo, void* vstMsg, const Nd4jLong* stMsgShapeInfo,
void* vstMsdx, const Nd4jLong* stMsdxShapeInfo, const double dRho, const double dEpsilon) {
const T rho = static_cast<T>(dRho);
const T epsilon = static_cast<T>(dEpsilon);
adaDeltaUpdaterCuda<T> << <blocksPerGrid, threadsPerBlock, 256, * stream >> > (vx, xShapeInfo, vinMsg, inMsgShapeInfo,
vinMsdx, inMsdxShapeInfo, vz, zShapeInfo, vstMsg, stMsgShapeInfo, vstMsdx, stMsdxShapeInfo, rho, epsilon);
}
///////////////////////////////////////////////////////////////////
void updaterAdaDelta(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initStateMsg, const NDArray& initStateMsdx,
NDArray& update, NDArray& stateMsg, NDArray& stateMsdx, const double dRho, const double dEpsilon) {
PointersManager manager(context, "adaDeltaUpdater");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradient.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
NDArray::prepareSpecialUse({ &update, &stateMsg, &stateMsdx }, { &gradient, &initStateMsg, &initStateMsdx });
BUILD_SINGLE_SELECTOR(gradient.dataType(), adaDeltaUpdaterCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), gradient.getSpecialBuffer(), gradient.getSpecialShapeInfo(),
initStateMsg.getSpecialBuffer(), initStateMsg.getSpecialShapeInfo(), initStateMsdx.getSpecialBuffer(), initStateMsdx.getSpecialShapeInfo(),
update.getSpecialBuffer(), update.getSpecialShapeInfo(),stateMsg.getSpecialBuffer(), stateMsg.getSpecialShapeInfo(),
stateMsdx.getSpecialBuffer(), stateMsdx.getSpecialShapeInfo(), dRho, dEpsilon), FLOAT_TYPES);
NDArray::registerSpecialUse({ &update, &stateMsg, &stateMsdx }, { &gradient, &initStateMsg, &initStateMsdx });
manager.synchronize();
}
}
}
}
|
164e76b19820347557069fb0ffc6b6f7fd6a5267.hip | // !!! This is a file automatically generated by hipify!!!
// RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args
// CHECK: #include <hip/hip_runtime.h>
// CHECK: #include "hip/hip_complex.h"
#include "hip/hip_complex.h"
#define TYPEFLOAT
#define DIMX 100
#define DIMY 40
#define moveX 2
#define moveY 1
#define MAXITERATIONS 10
#ifdef TYPEFLOAT
#define TYPE float
// CHECK: #define cTYPE hipFloatComplex
#define cTYPE cuFloatComplex
// CHECK: #define cMakecuComplex(re,i) make_hipFloatComplex(re,i)
#define cMakecuComplex(re,i) make_cuFloatComplex(re,i)
#endif
#ifdef TYPEDOUBLE
// CHECK: #define TYPE hipDoubleComplex
#define TYPE hipDoubleComplex
// CHECK: #define cMakecuComplex(re,i) make_hipDoubleComplex(re,i)
#define cMakecuComplex(re,i) make_cuDoubleComplex(re,i)
#endif
__device__ cTYPE juliaFunctor(cTYPE p, cTYPE c) {
// CHECK: return hipCaddf(hipCmulf(p, p), c);
return cuCaddf(cuCmulf(p, p), c);
}
__device__ cTYPE convertToComplex(int x, int y, float zoom) {
TYPE jx = 1.5 * (x - DIMX / 2) / (0.5 * zoom * DIMX) + moveX;
TYPE jy = (y - DIMY / 2) / (0.5 * zoom * DIMY) + moveY;
return cMakecuComplex(jx, jy);
}
__device__ int evolveComplexPoint(cTYPE p, cTYPE c) {
int it = 1;
// CHECK: while (it <= MAXITERATIONS && hipCabsf(p) <= 4) {
while (it <= MAXITERATIONS && cuCabsf(p) <= 4) {
p = juliaFunctor(p, c);
it++;
}
return it;
}
__global__ void computeJulia(int* data, cTYPE c, float zoom) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i<DIMX && j<DIMY) {
cTYPE p = convertToComplex(i, j, zoom);
data[i*DIMY + j] = evolveComplexPoint(p, c);
}
}
| 164e76b19820347557069fb0ffc6b6f7fd6a5267.cu | // RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args
// CHECK: #include <hip/hip_runtime.h>
// CHECK: #include "hip/hip_complex.h"
#include "cuComplex.h"
#define TYPEFLOAT
#define DIMX 100
#define DIMY 40
#define moveX 2
#define moveY 1
#define MAXITERATIONS 10
#ifdef TYPEFLOAT
#define TYPE float
// CHECK: #define cTYPE hipFloatComplex
#define cTYPE cuFloatComplex
// CHECK: #define cMakecuComplex(re,i) make_hipFloatComplex(re,i)
#define cMakecuComplex(re,i) make_cuFloatComplex(re,i)
#endif
#ifdef TYPEDOUBLE
// CHECK: #define TYPE hipDoubleComplex
#define TYPE cuDoubleComplex
// CHECK: #define cMakecuComplex(re,i) make_hipDoubleComplex(re,i)
#define cMakecuComplex(re,i) make_cuDoubleComplex(re,i)
#endif
__device__ cTYPE juliaFunctor(cTYPE p, cTYPE c) {
// CHECK: return hipCaddf(hipCmulf(p, p), c);
return cuCaddf(cuCmulf(p, p), c);
}
__device__ cTYPE convertToComplex(int x, int y, float zoom) {
TYPE jx = 1.5 * (x - DIMX / 2) / (0.5 * zoom * DIMX) + moveX;
TYPE jy = (y - DIMY / 2) / (0.5 * zoom * DIMY) + moveY;
return cMakecuComplex(jx, jy);
}
__device__ int evolveComplexPoint(cTYPE p, cTYPE c) {
int it = 1;
// CHECK: while (it <= MAXITERATIONS && hipCabsf(p) <= 4) {
while (it <= MAXITERATIONS && cuCabsf(p) <= 4) {
p = juliaFunctor(p, c);
it++;
}
return it;
}
__global__ void computeJulia(int* data, cTYPE c, float zoom) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i<DIMX && j<DIMY) {
cTYPE p = convertToComplex(i, j, zoom);
data[i*DIMY + j] = evolveComplexPoint(p, c);
}
}
|
473ee2da2b1d1c05602f8f6b63c3b7bd15559360.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Based on the hello-world created by Ingemar Ragnemalm 2010
(http://computer-graphics.se/hello-world-for-cuda.html)
and the book "CUDA by Example"
This example code detects CUDA devices, print their information
and tests the parallel programing using CUDA
Author: Joo Ribeiro
nvcc check-cuda.cu -L /usr/local/cuda/lib -lcudart -o check-cuda
*/
#include <stdio.h>
#include <unistd.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *ad;
int *bd;
int dev_count;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
hipDeviceProp_t prop;
hipGetDeviceCount(&dev_count);
printf("Number of CUDA devices found: %d\n\n", dev_count);
/* Get and print GPU information */
for (int i = 0; i < dev_count; i++) {
hipGetDeviceProperties(&prop, i);
printf( "--- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap:" );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( "Kernel execition timeout :" );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( "--- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( "--- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] );
printf( "Max grid dimensions:(%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] );
printf( "\n" );
}
/* End of print GPU information */
printf("The next print will be the result of a parallel processed array. If you see the string \"Hello World!\" then CUDA is working!\n\n");
printf("%s", a);
/* Using CUDA to generate the string "World!"*/
hipMalloc( (void**)&ad, csize );
hipMalloc( (void**)&bd, isize );
hipMemcpy( ad, a, csize, hipMemcpyHostToDevice );
hipMemcpy( bd, b, isize, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd);
hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost );
hipFree( ad );
hipFree( bd );
/* End of using CUDA to generate the string "World!"*/
printf("%s\n\n", a);
usleep(1000);
return EXIT_SUCCESS;
}
| 473ee2da2b1d1c05602f8f6b63c3b7bd15559360.cu | /*
Based on the hello-world created by Ingemar Ragnemalm 2010
(http://computer-graphics.se/hello-world-for-cuda.html)
and the book "CUDA by Example"
This example code detects CUDA devices, print their information
and tests the parallel programing using CUDA
Author: João Ribeiro
nvcc check-cuda.cu -L /usr/local/cuda/lib -lcudart -o check-cuda
*/
#include <stdio.h>
#include <unistd.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *ad;
int *bd;
int dev_count;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
cudaDeviceProp prop;
cudaGetDeviceCount(&dev_count);
printf("Number of CUDA devices found: %d\n\n", dev_count);
/* Get and print GPU information */
for (int i = 0; i < dev_count; i++) {
cudaGetDeviceProperties(&prop, i);
printf( "--- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap:" );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( "Kernel execition timeout :" );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( "--- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( "--- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] );
printf( "Max grid dimensions:(%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] );
printf( "\n" );
}
/* End of print GPU information */
printf("The next print will be the result of a parallel processed array. If you see the string \"Hello World!\" then CUDA is working!\n\n");
printf("%s", a);
/* Using CUDA to generate the string "World!"*/
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
/* End of using CUDA to generate the string "World!"*/
printf("%s\n\n", a);
usleep(1000);
return EXIT_SUCCESS;
}
|
5d92cdd19ee97895a13a17590297a71e973dd9c4.hip | // !!! This is a file automatically generated by hipify!!!
#include<utility>
#include<stdio.h>
#include<assert.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <ostream>
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <fstream>
#include <omp.h>
#include <time.h>
#include <string.h>
#include <utility>
// HEADER FILES
#include "helper.h"
#include "iterative-methods.h"
#include "iterative-1D-cpu.h"
#include "iterative-1D-gpu.h"
#include "iterative-1D-rectangular-gaussSeidel.h"
#include "iterative-1D-rectangular-multiple.h"
#define PI 3.14159265358979323
int main(int argc, char *argv[])
{
// INPUTS
const int nGrids = atoi(argv[1]);
const int threadsPerBlock = atoi(argv[2]);
const int nInnerUpdates = atoi(argv[3]);
// const int TOL = atoi(argv[4]);
const int TOL = 10;
int nGSCpuIterations;
int nGSGpuIterations;
int nGSCycles;
// INITIALIZE ARRAYS
float * initX = new float[nGrids];
float * rhs = new float[nGrids];
float * leftMatrix = new float[nGrids];
float * centerMatrix = new float[nGrids];
float * rightMatrix = new float[nGrids];
float dx = 1.0f / (nGrids - 1);
// 1D POISSON MATRIX
for (int iGrid = 0; iGrid < nGrids; ++iGrid) {
if (iGrid == 0 || iGrid == nGrids-1) {
initX[iGrid] = 0.0f;
}
else {
initX[iGrid] = (float)iGrid;
}
rhs[iGrid] = 1.0f;
leftMatrix[iGrid] = -1.0f / (dx * dx);
centerMatrix[iGrid] = 2.0f / (dx * dx);
rightMatrix[iGrid] = -1.0f / (dx * dx);
}
// OBTAIN NUMBER OF ITERATIONS NECESSARY TO ACHIEVE TOLERANCE FOR EACH METHOD
nGSCpuIterations = gaussSeidelCpuIterationCount(initX, rhs, leftMatrix, centerMatrix,
rightMatrix, nGrids, TOL);
nGSGpuIterations = gaussSeidelGpuClassicIterationCount(initX, rhs, leftMatrix,
centerMatrix, rightMatrix, nGrids, TOL, threadsPerBlock);
nGSCycles = gaussSeidelGpuRectangularIterationCount(initX, rhs, leftMatrix,
centerMatrix, rightMatrix, nGrids, threadsPerBlock, TOL, nInnerUpdates);
// CPU - GAUSS-SEIDEL
clock_t cpuGSStartTime = clock();
float * solutionGSCpu = gaussSeidelCpu(initX, rhs, leftMatrix, centerMatrix,
rightMatrix, nGrids, nGSCpuIterations);
clock_t cpuGSEndTime = clock();
double cpuGSTime = (cpuGSEndTime - cpuGSStartTime) / (float) CLOCKS_PER_SEC;
cpuGSTime = cpuGSTime * (1e3); // Convert to ms
// GPU - GAUSS-SEIDEL
hipEvent_t startGSGpu, stopGSGpu;
float gpuGSTime;
hipEventCreate( &startGSGpu );
hipEventCreate( &stopGSGpu );
hipEventRecord(startGSGpu, 0);
float * solutionGSGpu = gaussSeidelGpuClassic(initX, rhs, leftMatrix,
centerMatrix, rightMatrix, nGrids, nGSGpuIterations, threadsPerBlock);
hipEventRecord(stopGSGpu, 0);
hipEventSynchronize(stopGSGpu);
hipEventElapsedTime(&gpuGSTime, startGSGpu, stopGSGpu);
// RECTANGULAR METHOD - GAUSS-SEIDEL
hipEvent_t startGSGpuRectangular, stopGSGpuRectangular;
float gpuGSRectangularTime;
hipEventCreate( &startGSGpuRectangular );
hipEventCreate( &stopGSGpuRectangular );
hipEventRecord( startGSGpuRectangular, 0);
float * solutionGSGpuRectangular = gaussSeidelGpuRectangular(initX, rhs, leftMatrix,
centerMatrix, rightMatrix, nGrids, threadsPerBlock, nGSCycles, nInnerUpdates);
hipEventRecord(stopGSGpuRectangular, 0);
hipEventSynchronize(stopGSGpuRectangular);
hipEventElapsedTime(&gpuGSRectangularTime, startGSGpuRectangular, stopGSGpuRectangular);
// COMPUTE TIME FACTORS
float cpuToGpu = cpuGSTime / gpuGSTime;
float gpuToRectangular = gpuGSTime / gpuGSRectangularTime;
float cpuToRectangular = cpuGSTime / gpuGSRectangularTime;
// PRINT SOLUTION
for (int i = 0; i < nGrids; i++) {
printf("Grid %d = %f %f %f\n", i, solutionGSCpu[i], solutionGSGpu[i], solutionGSGpuRectangular[i]);
}
// PRINTOUT
// Print parameters of the problem to screen
printf("===============INFORMATION============================\n");
printf("Number of grid points: %d\n", nGrids);
printf("Threads Per Block: %d\n", threadsPerBlock);
printf("Number of Cycles of Gauss-Seidel Rectangular performed: %d\n", nGSCycles);
printf("CPU -> GPU Speedup Factor is %f\n", cpuToGpu);
printf("GPU -> GPU Rectangular Speedup Factor is %f\n", gpuToRectangular);
printf("CPU -> GPU Rectangular Speedup Factor is %f\n", cpuToRectangular);
printf("======================================================\n");
// Print out number of iterations needed for each method
printf("Number of Iterations needed for GS CPU: %d \n", nGSCpuIterations);
printf("Number of Iterations needed for GS GPU: %d \n", nGSGpuIterations);
printf("Number of Cycles needed for GS GPU Rectangular: %d (with %d inner updates) \n", nGSCycles, nInnerUpdates);
printf("======================================================\n");
// Print out time for cpu, classic gpu, and swept gpu approaches
printf("Time needed for the GS CPU: %f ms\n", cpuGSTime);
printf("Time needed for the GS GPU: %f ms\n", gpuGSTime);
printf("Time needed for the GS GPU Rectangular method: %f ms\n", gpuGSRectangularTime);
printf("======================================================\n");
// Compute the residual of the resulting solution (|b-Ax|)
float residualGSCpu = Residual(solutionGSCpu, rhs, leftMatrix, centerMatrix, rightMatrix, nGrids);
float residualGSGpu = Residual(solutionGSGpu, rhs, leftMatrix, centerMatrix, rightMatrix, nGrids);
float residualGSGpuRectangular = Residual(solutionGSGpuRectangular, rhs, leftMatrix, centerMatrix, rightMatrix, nGrids);
printf("Residual of the GS CPU solution is %f\n", residualGSCpu);
printf("Residual of the GS GPU solution is %f\n", residualGSGpu);
printf("Residual of the GS Rectangular solution is %f\n", residualGSGpuRectangular);
/* for (int i = 0; i < nGrids; i++) {
if (i == 0 || i == nGrids-1) {
assert(solutionGpuRectangular[i] == 0.0);
}
else {
assert(solutionGpuRectangular[i] == (float)(cycles * nIterations + 1.0));
}
}
*/
/* // Print out time for cpu, classic gpu, and swept gpu approaches
float cpuTimePerIteration = (cpuTime / nIters) * 1e3;
float classicTimePerIteration = gpuTime / nIters;
float sweptTimePerIteration = timeSwept / nIters;
float timeMultiplier = classicTimePerIteration / sweptTimePerIteration;
printf("Time needed for the CPU (per iteration): %f ms\n", cpuTimePerIteration);
printf("Time needed for the Classic GPU (per iteration) is %f ms\n", classicTimePerIteration);
printf("Time needed for the Swept GPU (per iteration): %f ms\n", sweptTimePerIteration);
*/
// Write results to file
std::ofstream results;
results.open("results-gs.txt", std::ios_base::app);
results << nGrids << " " << threadsPerBlock << " " << TOL << " " << cpuGSTime << " " << gpuGSTime << " " << gpuGSRectangularTime << "\n";
results.close();
// FREE MEMORY
delete[] initX;
delete[] rhs;
delete[] leftMatrix;
delete[] centerMatrix;
delete[] rightMatrix;
delete[] solutionGSCpu;
delete[] solutionGSGpu;
delete[] solutionGSGpuRectangular;
return 0;
}
| 5d92cdd19ee97895a13a17590297a71e973dd9c4.cu | #include<utility>
#include<stdio.h>
#include<assert.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include <ostream>
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <fstream>
#include <omp.h>
#include <time.h>
#include <string.h>
#include <utility>
// HEADER FILES
#include "helper.h"
#include "iterative-methods.h"
#include "iterative-1D-cpu.h"
#include "iterative-1D-gpu.h"
#include "iterative-1D-rectangular-gaussSeidel.h"
#include "iterative-1D-rectangular-multiple.h"
#define PI 3.14159265358979323
int main(int argc, char *argv[])
{
// INPUTS
const int nGrids = atoi(argv[1]);
const int threadsPerBlock = atoi(argv[2]);
const int nInnerUpdates = atoi(argv[3]);
// const int TOL = atoi(argv[4]);
const int TOL = 10;
int nGSCpuIterations;
int nGSGpuIterations;
int nGSCycles;
// INITIALIZE ARRAYS
float * initX = new float[nGrids];
float * rhs = new float[nGrids];
float * leftMatrix = new float[nGrids];
float * centerMatrix = new float[nGrids];
float * rightMatrix = new float[nGrids];
float dx = 1.0f / (nGrids - 1);
// 1D POISSON MATRIX
for (int iGrid = 0; iGrid < nGrids; ++iGrid) {
if (iGrid == 0 || iGrid == nGrids-1) {
initX[iGrid] = 0.0f;
}
else {
initX[iGrid] = (float)iGrid;
}
rhs[iGrid] = 1.0f;
leftMatrix[iGrid] = -1.0f / (dx * dx);
centerMatrix[iGrid] = 2.0f / (dx * dx);
rightMatrix[iGrid] = -1.0f / (dx * dx);
}
// OBTAIN NUMBER OF ITERATIONS NECESSARY TO ACHIEVE TOLERANCE FOR EACH METHOD
nGSCpuIterations = gaussSeidelCpuIterationCount(initX, rhs, leftMatrix, centerMatrix,
rightMatrix, nGrids, TOL);
nGSGpuIterations = gaussSeidelGpuClassicIterationCount(initX, rhs, leftMatrix,
centerMatrix, rightMatrix, nGrids, TOL, threadsPerBlock);
nGSCycles = gaussSeidelGpuRectangularIterationCount(initX, rhs, leftMatrix,
centerMatrix, rightMatrix, nGrids, threadsPerBlock, TOL, nInnerUpdates);
// CPU - GAUSS-SEIDEL
clock_t cpuGSStartTime = clock();
float * solutionGSCpu = gaussSeidelCpu(initX, rhs, leftMatrix, centerMatrix,
rightMatrix, nGrids, nGSCpuIterations);
clock_t cpuGSEndTime = clock();
double cpuGSTime = (cpuGSEndTime - cpuGSStartTime) / (float) CLOCKS_PER_SEC;
cpuGSTime = cpuGSTime * (1e3); // Convert to ms
// GPU - GAUSS-SEIDEL
cudaEvent_t startGSGpu, stopGSGpu;
float gpuGSTime;
cudaEventCreate( &startGSGpu );
cudaEventCreate( &stopGSGpu );
cudaEventRecord(startGSGpu, 0);
float * solutionGSGpu = gaussSeidelGpuClassic(initX, rhs, leftMatrix,
centerMatrix, rightMatrix, nGrids, nGSGpuIterations, threadsPerBlock);
cudaEventRecord(stopGSGpu, 0);
cudaEventSynchronize(stopGSGpu);
cudaEventElapsedTime(&gpuGSTime, startGSGpu, stopGSGpu);
// RECTANGULAR METHOD - GAUSS-SEIDEL
cudaEvent_t startGSGpuRectangular, stopGSGpuRectangular;
float gpuGSRectangularTime;
cudaEventCreate( &startGSGpuRectangular );
cudaEventCreate( &stopGSGpuRectangular );
cudaEventRecord( startGSGpuRectangular, 0);
float * solutionGSGpuRectangular = gaussSeidelGpuRectangular(initX, rhs, leftMatrix,
centerMatrix, rightMatrix, nGrids, threadsPerBlock, nGSCycles, nInnerUpdates);
cudaEventRecord(stopGSGpuRectangular, 0);
cudaEventSynchronize(stopGSGpuRectangular);
cudaEventElapsedTime(&gpuGSRectangularTime, startGSGpuRectangular, stopGSGpuRectangular);
// COMPUTE TIME FACTORS
float cpuToGpu = cpuGSTime / gpuGSTime;
float gpuToRectangular = gpuGSTime / gpuGSRectangularTime;
float cpuToRectangular = cpuGSTime / gpuGSRectangularTime;
// PRINT SOLUTION
for (int i = 0; i < nGrids; i++) {
printf("Grid %d = %f %f %f\n", i, solutionGSCpu[i], solutionGSGpu[i], solutionGSGpuRectangular[i]);
}
// PRINTOUT
// Print parameters of the problem to screen
printf("===============INFORMATION============================\n");
printf("Number of grid points: %d\n", nGrids);
printf("Threads Per Block: %d\n", threadsPerBlock);
printf("Number of Cycles of Gauss-Seidel Rectangular performed: %d\n", nGSCycles);
printf("CPU -> GPU Speedup Factor is %f\n", cpuToGpu);
printf("GPU -> GPU Rectangular Speedup Factor is %f\n", gpuToRectangular);
printf("CPU -> GPU Rectangular Speedup Factor is %f\n", cpuToRectangular);
printf("======================================================\n");
// Print out number of iterations needed for each method
printf("Number of Iterations needed for GS CPU: %d \n", nGSCpuIterations);
printf("Number of Iterations needed for GS GPU: %d \n", nGSGpuIterations);
printf("Number of Cycles needed for GS GPU Rectangular: %d (with %d inner updates) \n", nGSCycles, nInnerUpdates);
printf("======================================================\n");
// Print out time for cpu, classic gpu, and swept gpu approaches
printf("Time needed for the GS CPU: %f ms\n", cpuGSTime);
printf("Time needed for the GS GPU: %f ms\n", gpuGSTime);
printf("Time needed for the GS GPU Rectangular method: %f ms\n", gpuGSRectangularTime);
printf("======================================================\n");
// Compute the residual of the resulting solution (|b-Ax|)
float residualGSCpu = Residual(solutionGSCpu, rhs, leftMatrix, centerMatrix, rightMatrix, nGrids);
float residualGSGpu = Residual(solutionGSGpu, rhs, leftMatrix, centerMatrix, rightMatrix, nGrids);
float residualGSGpuRectangular = Residual(solutionGSGpuRectangular, rhs, leftMatrix, centerMatrix, rightMatrix, nGrids);
printf("Residual of the GS CPU solution is %f\n", residualGSCpu);
printf("Residual of the GS GPU solution is %f\n", residualGSGpu);
printf("Residual of the GS Rectangular solution is %f\n", residualGSGpuRectangular);
/* for (int i = 0; i < nGrids; i++) {
if (i == 0 || i == nGrids-1) {
assert(solutionGpuRectangular[i] == 0.0);
}
else {
assert(solutionGpuRectangular[i] == (float)(cycles * nIterations + 1.0));
}
}
*/
/* // Print out time for cpu, classic gpu, and swept gpu approaches
float cpuTimePerIteration = (cpuTime / nIters) * 1e3;
float classicTimePerIteration = gpuTime / nIters;
float sweptTimePerIteration = timeSwept / nIters;
float timeMultiplier = classicTimePerIteration / sweptTimePerIteration;
printf("Time needed for the CPU (per iteration): %f ms\n", cpuTimePerIteration);
printf("Time needed for the Classic GPU (per iteration) is %f ms\n", classicTimePerIteration);
printf("Time needed for the Swept GPU (per iteration): %f ms\n", sweptTimePerIteration);
*/
// Write results to file
std::ofstream results;
results.open("results-gs.txt", std::ios_base::app);
results << nGrids << " " << threadsPerBlock << " " << TOL << " " << cpuGSTime << " " << gpuGSTime << " " << gpuGSRectangularTime << "\n";
results.close();
// FREE MEMORY
delete[] initX;
delete[] rhs;
delete[] leftMatrix;
delete[] centerMatrix;
delete[] rightMatrix;
delete[] solutionGSCpu;
delete[] solutionGSGpu;
delete[] solutionGSGpuRectangular;
return 0;
}
|
bb5a96caac0a365ab2cf5c04d28a41766a0a9160.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
// input: points(b, c, n) idx(b, m)
// output: out(b, c, m)
__global__ void gather_points_kernel(int b, int c, int n, int m,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int l = blockIdx.y; l < c; l += gridDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
int a = idx[i * m + j];
out[(i * c + l) * m + j] = points[(i * c + l) * n + a];
}
}
}
}
void gather_points_kernel_wrapper(int b, int c, int n, int npoints,
const float *points, const int *idx,
float *out) {
hipLaunchKernelGGL(( gather_points_kernel), dim3(dim3(b, c, 1)), dim3(opt_n_threads(npoints)), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), b, c, n, npoints,
points, idx, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, m) idx(b, m)
// output: grad_points(b, c, n)
__global__ void gather_points_grad_kernel(int b, int c, int n, int m,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int l = blockIdx.y; l < c; l += gridDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
int a = idx[i * m + j];
atomicAdd(grad_points + (i * c + l) * n + a,
grad_out[(i * c + l) * m + j]);
}
}
}
}
void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
const float *grad_out, const int *idx,
float *grad_points) {
hipLaunchKernelGGL(( gather_points_grad_kernel), dim3(dim3(b, c, 1)), dim3(opt_n_threads(npoints)), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
b, c, n, npoints, grad_out, idx, grad_points);
CUDA_CHECK_ERRORS();
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
int idx1, int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
// Input dataset: (b, n, 3), tmp: (b, n)
// Ouput idxs (b, m)
template <unsigned int block_size>
__global__ void furthest_point_sampling_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0) idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
if (mag <= 1e-3) continue;
float d =
(x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0) idxs[j] = old;
}
}
void furthest_point_sampling_kernel_wrapper(int b, int n, int m,
const float *dataset, float *temp,
int *idxs) {
unsigned int n_threads = opt_n_threads(n);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
switch (n_threads) {
case 512:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 256:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<256>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 128:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<128>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 64:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<64>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 32:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<32>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 16:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<16>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 8:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<8>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 4:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<4>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 2:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<2>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 1:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<1>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
default:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
}
CUDA_CHECK_ERRORS();
}
| bb5a96caac0a365ab2cf5c04d28a41766a0a9160.cu | #include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
// input: points(b, c, n) idx(b, m)
// output: out(b, c, m)
__global__ void gather_points_kernel(int b, int c, int n, int m,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int l = blockIdx.y; l < c; l += gridDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
int a = idx[i * m + j];
out[(i * c + l) * m + j] = points[(i * c + l) * n + a];
}
}
}
}
void gather_points_kernel_wrapper(int b, int c, int n, int npoints,
const float *points, const int *idx,
float *out) {
gather_points_kernel<<<dim3(b, c, 1), opt_n_threads(npoints), 0,
at::cuda::getCurrentCUDAStream()>>>(b, c, n, npoints,
points, idx, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, m) idx(b, m)
// output: grad_points(b, c, n)
__global__ void gather_points_grad_kernel(int b, int c, int n, int m,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int l = blockIdx.y; l < c; l += gridDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
int a = idx[i * m + j];
atomicAdd(grad_points + (i * c + l) * n + a,
grad_out[(i * c + l) * m + j]);
}
}
}
}
void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
const float *grad_out, const int *idx,
float *grad_points) {
gather_points_grad_kernel<<<dim3(b, c, 1), opt_n_threads(npoints), 0,
at::cuda::getCurrentCUDAStream()>>>(
b, c, n, npoints, grad_out, idx, grad_points);
CUDA_CHECK_ERRORS();
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
int idx1, int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
// Input dataset: (b, n, 3), tmp: (b, n)
// Ouput idxs (b, m)
template <unsigned int block_size>
__global__ void furthest_point_sampling_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0) idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
if (mag <= 1e-3) continue;
float d =
(x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0) idxs[j] = old;
}
}
void furthest_point_sampling_kernel_wrapper(int b, int n, int m,
const float *dataset, float *temp,
int *idxs) {
unsigned int n_threads = opt_n_threads(n);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
switch (n_threads) {
case 512:
furthest_point_sampling_kernel<512>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 256:
furthest_point_sampling_kernel<256>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 128:
furthest_point_sampling_kernel<128>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 64:
furthest_point_sampling_kernel<64>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 32:
furthest_point_sampling_kernel<32>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 16:
furthest_point_sampling_kernel<16>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 8:
furthest_point_sampling_kernel<8>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 4:
furthest_point_sampling_kernel<4>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 2:
furthest_point_sampling_kernel<2>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 1:
furthest_point_sampling_kernel<1>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
default:
furthest_point_sampling_kernel<512>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
}
CUDA_CHECK_ERRORS();
}
|
9af4e29c23f1c0cfd57ca982da710f0f85eca5ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#define SIZE 1024*1024*1000
#define CUDA_CHECK_RETURN(value) {\
hipError_t _m_cudaStat = value;\
if (_m_cudaStat != hipSuccess) {\
fprintf(stderr, "Error %s at line %d in file %s\n", hipGetErrorString(_m_cudaStat),__LINE__, __FILE__);\
exit(1);\
}\
}
__host__ int main()
{
char dev;
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
float elapsedTime;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float* vec1 = new float[SIZE];
hipEventRecord(start, 0);
for (int i = 0; i < SIZE; i++)
{
vec1[i] = i;
// printf("#%d\t%f\t %f\n", i, vec1[i]);
}
hipEventRecord(stop, 0);
// float time = 0;
hipEvent_t syncEvent;
printf("%g", elapsedTime);
float* devVec1;
hipMalloc((void**)&devVec1, sizeof(float) * SIZE);
hipMemcpy(devVec1, vec1, sizeof(float) * SIZE, hipMemcpyHostToDevice);
hipFree(devVec1);
delete[] vec1; vec1 = 0;
return 0;
}
| 9af4e29c23f1c0cfd57ca982da710f0f85eca5ca.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cuda.h>
#define SIZE 1024*1024*1000
#define CUDA_CHECK_RETURN(value) {\
cudaError_t _m_cudaStat = value;\
if (_m_cudaStat != cudaSuccess) {\
fprintf(stderr, "Error %s at line %d in file %s\n", cudaGetErrorString(_m_cudaStat),__LINE__, __FILE__);\
exit(1);\
}\
}
__host__ int main()
{
char dev;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float* vec1 = new float[SIZE];
cudaEventRecord(start, 0);
for (int i = 0; i < SIZE; i++)
{
vec1[i] = i;
// printf("#%d\t%f\t %f\n", i, vec1[i]);
}
cudaEventRecord(stop, 0);
// float time = 0;
cudaEvent_t syncEvent;
printf("%g", elapsedTime);
float* devVec1;
cudaMalloc((void**)&devVec1, sizeof(float) * SIZE);
cudaMemcpy(devVec1, vec1, sizeof(float) * SIZE, cudaMemcpyHostToDevice);
cudaFree(devVec1);
delete[] vec1; vec1 = 0;
return 0;
}
|
c837617d5ee525d24560bea24b8402aacbb3f996.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef MODEL
# define MODEL
# include "tools.cpp"
# include "cuda_functions.cu"
# include <math.h>
namespace model{
void model1(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int extra,itpp::ivec conA, itpp::ivec conB){
/* MODEL 1
*
* *
* * -- * last qubit
* *
*
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
for(int i=0;i<nqubits-1;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%(nqubits-1),dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//se hace la interacion 0 con ultimo
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,0,dev_R,dev_I,cos(j),sin(j),l);
for(int i=0;i<nqubits;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model11(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int extra,itpp::ivec conA, itpp::ivec conB){
/* MODEL 1.1
*
* | *
* | * -- * last qubit
* | *
*
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la interaccion cruzada dentro de la cadena
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-2,2,dev_R,dev_I,cos(js(nqubits/2)),sin(js(nqubits/2)),l);
for(int i=0;i<nqubits-1;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%(nqubits-1),dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//se hace la interacion 0 con ultimo
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,0,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model2(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int extra,itpp::ivec conA, itpp::ivec conB){
/* MODEL 2
*
* * -
* * - * last qubit
* * -
*
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
for(int i=0;i<nqubits-1;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%(nqubits-1),dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//se hace la interacion 0,1,penultimo con ultimo
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,0,dev_R,dev_I,cos(j),sin(j),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,1,dev_R,dev_I,cos(j),sin(j),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,nqubits-2,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void chain(double *dev_R, double *dev_I, double j, itpp::vec b , int nqubits){
/* MODEL CHAIN CLOSED
*
* *
* *
* *
*
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
set_parameters(b,kcos,ksin,bx,by,bz);
for(int i=0;i<nqubits;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%(nqubits),dev_R,dev_I,cos(j),sin(j),l);
}
for(int i=0;i<nqubits;i++) {
hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void chain_open(double *dev_R, double *dev_I, double j, itpp::vec b, int nqubits){
/* MODEL CHAIN OPEN
* * * * * * * * *
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
set_parameters(b,kcos,ksin,bx,by,bz);
for(int i=0;i<nqubits-1;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i+1,dev_R,dev_I,cos(j),sin(j),l);
}
for(int i=0;i<nqubits;i++) {
hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void lattice(double *dev_R, double *dev_I, double j, itpp::vec b , int nqubits, int xlen){
/* MODEL LATTICE
* * * *
* * * *
* * * *
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
int i_hor,i_ver;
choosenumblocks(l,numthreads,numblocks);
set_parameters(b,kcos,ksin,bx,by,bz);
for(int i=0;i<nqubits-1;i++) {
i_hor=(i+1)%xlen+(i/xlen)*xlen;
i_ver=(i+xlen)%nqubits;
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,cos(j),sin(j),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,cos(j),sin(j),l);
}
for(int i=0;i<nqubits;i++) {
hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model3(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 3 CLOSED
<- * * * * * * * ->
/
<- * * * * ->
\
* last qubit
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamao xlen
for(int i=0;i<xlen;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%(xlen),dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamao nqubits - xlen - 1
for(int i=0;i<nqubits-1-xlen;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i+xlen,(i+1)%(nqubits-1-xlen)+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la interaccion A B
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, xlen-1,xlen,dev_R,dev_I,cos(jp),sin(jp),l);
//se hace la interacion 0 con A
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,0,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model3_open(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 3 OPEN
* * * * * * *
/
* * * *
\
* last qubit - not kicked
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
// itpp::vec bC(3);
// double kcosC,ksinC,bxC,byC,bzC;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamao xlen
for(int i=0;i<xlen-1;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i+1,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamao nqubits - xlen - 1
for(int i=0;i<nqubits-2-xlen;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i+xlen,i+1+xlen,dev_R,dev_I,cos(js(i+xlen)),sin(js(i+xlen)),l);
}
//la interaccion A B
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 2,10,dev_R,dev_I,cos(jp),sin(jp),l);
//se hace la interacion 0 con A
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,2,dev_R,dev_I,cos(j),sin(j),l);
// INTERACCION MU
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,15,dev_R,dev_I,cos(.01),sin(.01),l);
//evolucion patada magnetica
for(int i=0;i<nqubits-1;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
// bC(0)=(itpp::pi)/(2*std::sqrt(2)); bC(1)=0.; bC(2)=(itpp::pi)/(2*std::sqrt(2));
// set_parameters(bC,kcosC,ksinC,bxC,byC,bzC);
// hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,dev_R,dev_I,bxC,byC,bzC,kcosC,ksinC,l);
return;
}
void model4(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 4 CLOSED
<- * * * * * * * ->
\ / / /
<- * * * * ->
\
* last qubit
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamao xlen
for(int i=0;i<xlen;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%(xlen),dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamao nqubits - xlen - 1
for(int i=0;i<nqubits-1-xlen;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i+xlen,(i+1)%(nqubits-1-xlen)+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//las interacciones A B
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, xlen-4,xlen,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, xlen-2,xlen+2,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, xlen-1,xlen+4,dev_R,dev_I,cos(jp),sin(jp),l);
//se hace la interacion 0 con A
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,0,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model4_open(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 4 OPEN
* * * * * * *
\ / / /
* * * *
\
* last qubit - not kicked
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamao xlen
for(int i=0;i<xlen-1;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i+1,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamao nqubits - xlen - 1
for(int i=0;i<nqubits-2-xlen;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i+xlen,i+1+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la interaccion A B
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 0,xlen,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, xlen/2-1,nqubits-xlen,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, xlen/2+1,nqubits-xlen+1,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, xlen-1,nqubits-2,dev_R,dev_I,cos(jp),sin(jp),l);
//se hace la interacion 0 con A
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,xlen/2,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits-1;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model5(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 5 CLOSED
<- * * * * * * ->
\ \ \ \ \ \
<- * * * * * * ->
\
* last qubit
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamao xlen
for(int i=0;i<xlen;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%(xlen),dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamao nqubits - xlen - 1
for(int i=0;i<nqubits-1-xlen;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i+xlen,(i+1)%(nqubits-1-xlen)+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la interaccion A B es 1 a 1
for(int i=0;i<xlen-1;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i+xlen,dev_R,dev_I,cos(jp),sin(jp),l);
}
//se hace la interacion 0 con A
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,0,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model5_open(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 5 OPEN
* * * * * *
\ \ \ \ \ \
* * * * * *
\
* last qubit
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamao xlen
for(int i=0;i<xlen-1;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i+1,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamao nqubits - xlen - 1
for(int i=0;i<nqubits-2-xlen;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i+xlen,i+1+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la interaccion A B es 1 a 1
for(int i=0;i<xlen-1;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i+xlen,dev_R,dev_I,cos(jp),sin(jp),l);
}
//se hace la interacion 0 con A
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,0,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model6(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 6 CLOSED
<-* * * * * * ->
\ / \ /
<- * * * * * * ->
\ \ / \
<- * * * * * * ->
\
* last qubit
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamao xlen
for(int i=0;i<xlen;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%(xlen),dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamao nqubits - xlen - 1
for(int i=0;i<nqubits-1-xlen;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i+xlen,(i+1)%(nqubits-1-xlen)+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la interaccion A B es 1 a 1
for(int i=0;i<xlen-1;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i+xlen,dev_R,dev_I,cos(jp),sin(jp),l);
}
//se hace la interacion 0 con A
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,0,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model7(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 7 CASO ESPECIAL
* * * * * * *
\ / / /
* * * *
\
* last qubit - not kicked
PARA A=6 B=10
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamao xlen
for(int i=0;i<xlen-1;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i+1,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamao nqubits - xlen - 1
for(int i=0;i<nqubits-2-xlen;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i+xlen,i+1+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la interaccion A B
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 0,6,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 1,7,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 2,9,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 3,10,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 4,13,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 5,15,dev_R,dev_I,cos(jp),sin(jp),l);
//CONEXIONES EXTRA A B
//Ui_kernel<<<numblocks,numthreads>>>(0,7,dev_R,dev_I,cos(jp),sin(jp),l);
//Ui_kernel<<<numblocks,numthreads>>>(1,8,dev_R,dev_I,cos(jp),sin(jp),l);
//Ui_kernel<<<numblocks,numthreads>>>(2,10,dev_R,dev_I,cos(jp),sin(jp),l);
//Ui_kernel<<<numblocks,numthreads>>>(3,11,dev_R,dev_I,cos(jp),sin(jp),l);
//Ui_kernel<<<numblocks,numthreads>>>(4,12,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 5,14,dev_R,dev_I,cos(jp),sin(jp),l);
//se hace la interacion 0 con A
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,2,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits-1;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model8(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 8 CASO ESPECIAL
* * * * * * *
\ / / /
* * * *
\
* last qubit - not kicked
PARA A=10 B=15
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamao xlen
for(int i=0;i<xlen-1;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i+1,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamao nqubits - xlen - 1
for(int i=0;i<nqubits-2-xlen;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i+xlen,i+1+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la interaccion A B
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 0,10,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 1,12,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 2,13,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 3,15,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 4,17,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 5,18,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 6,19,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 7,21,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 8,23,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 9,25,dev_R,dev_I,cos(jp),sin(jp),l);
//INTERACCIONES EXTRA A B
/* hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 0,11,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 1,13,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 2,14,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 3,16,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 4,16,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 5,19,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 6,20,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 7,20,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 8,22,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 9,24,dev_R,dev_I,cos(jp),sin(jp),l); */
//se hace la interacion 0 con A
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,6,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits-1;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void modelVar(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL VARIABLE CASO ESPECIAL
* * * * * * *
\ / / /
* * * *
\
* last qubit - not kicked
PARA A=6 B=10
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamao xlen
for(int i=0;i<xlen-1;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i+1,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamao nqubits - xlen - 1
for(int i=0;i<nqubits-2-xlen;i++) {
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i+xlen,i+1+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la interaccion variable A B
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 0,14,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 3,8,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 4,15,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 4,12,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 5,6,dev_R,dev_I,cos(jp),sin(jp),l);
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, 5,10,dev_R,dev_I,cos(jp),sin(jp),l);
//se hace la interacion 0 con A
hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, nqubits-1,2,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits-1;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
}
#endif | c837617d5ee525d24560bea24b8402aacbb3f996.cu | #ifndef MODEL
# define MODEL
# include "tools.cpp"
# include "cuda_functions.cu"
# include <math.h>
namespace model{
void model1(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int extra,itpp::ivec conA, itpp::ivec conB){
/* MODEL 1
*
* *
* * -- * last qubit
* *
*
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
for(int i=0;i<nqubits-1;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%(nqubits-1),dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//se hace la interacion 0 con ultimo
Ui_kernel<<<numblocks,numthreads>>>(nqubits-1,0,dev_R,dev_I,cos(j),sin(j),l);
for(int i=0;i<nqubits;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model11(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int extra,itpp::ivec conA, itpp::ivec conB){
/* MODEL 1.1
*
* | *
* | * -- * last qubit
* | *
*
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la interaccion cruzada dentro de la cadena
Ui_kernel<<<numblocks,numthreads>>>(nqubits-2,2,dev_R,dev_I,cos(js(nqubits/2)),sin(js(nqubits/2)),l);
for(int i=0;i<nqubits-1;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%(nqubits-1),dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//se hace la interacion 0 con ultimo
Ui_kernel<<<numblocks,numthreads>>>(nqubits-1,0,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model2(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int extra,itpp::ivec conA, itpp::ivec conB){
/* MODEL 2
*
* * -
* * - * last qubit
* * -
*
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
for(int i=0;i<nqubits-1;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%(nqubits-1),dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//se hace la interacion 0,1,penultimo con ultimo
Ui_kernel<<<numblocks,numthreads>>>(nqubits-1,0,dev_R,dev_I,cos(j),sin(j),l);
Ui_kernel<<<numblocks,numthreads>>>(nqubits-1,1,dev_R,dev_I,cos(j),sin(j),l);
Ui_kernel<<<numblocks,numthreads>>>(nqubits-1,nqubits-2,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void chain(double *dev_R, double *dev_I, double j, itpp::vec b , int nqubits){
/* MODEL CHAIN CLOSED
*
* *
* *
* *
*
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
set_parameters(b,kcos,ksin,bx,by,bz);
for(int i=0;i<nqubits;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%(nqubits),dev_R,dev_I,cos(j),sin(j),l);
}
for(int i=0;i<nqubits;i++) {
Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void chain_open(double *dev_R, double *dev_I, double j, itpp::vec b, int nqubits){
/* MODEL CHAIN OPEN
* * * * * * * * *
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
set_parameters(b,kcos,ksin,bx,by,bz);
for(int i=0;i<nqubits-1;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,i+1,dev_R,dev_I,cos(j),sin(j),l);
}
for(int i=0;i<nqubits;i++) {
Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void lattice(double *dev_R, double *dev_I, double j, itpp::vec b , int nqubits, int xlen){
/* MODEL LATTICE
* * * *
* * * *
* * * *
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
int i_hor,i_ver;
choosenumblocks(l,numthreads,numblocks);
set_parameters(b,kcos,ksin,bx,by,bz);
for(int i=0;i<nqubits-1;i++) {
i_hor=(i+1)%xlen+(i/xlen)*xlen;
i_ver=(i+xlen)%nqubits;
Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,cos(j),sin(j),l);
Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,cos(j),sin(j),l);
}
for(int i=0;i<nqubits;i++) {
Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model3(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 3 CLOSED
<- * * * * * * * ->
/
<- * * * * ->
\
* last qubit
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamaño xlen
for(int i=0;i<xlen;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%(xlen),dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamaño nqubits - xlen - 1
for(int i=0;i<nqubits-1-xlen;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i+xlen,(i+1)%(nqubits-1-xlen)+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la interaccion A B
Ui_kernel<<<numblocks,numthreads>>>(xlen-1,xlen,dev_R,dev_I,cos(jp),sin(jp),l);
//se hace la interacion 0 con A
Ui_kernel<<<numblocks,numthreads>>>(nqubits-1,0,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model3_open(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 3 OPEN
* * * * * * *
/
* * * *
\
* last qubit - not kicked
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
// itpp::vec bC(3);
// double kcosC,ksinC,bxC,byC,bzC;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamaño xlen
for(int i=0;i<xlen-1;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,i+1,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamaño nqubits - xlen - 1
for(int i=0;i<nqubits-2-xlen;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i+xlen,i+1+xlen,dev_R,dev_I,cos(js(i+xlen)),sin(js(i+xlen)),l);
}
//la interaccion A B
Ui_kernel<<<numblocks,numthreads>>>(2,10,dev_R,dev_I,cos(jp),sin(jp),l);
//se hace la interacion 0 con A
Ui_kernel<<<numblocks,numthreads>>>(nqubits-1,2,dev_R,dev_I,cos(j),sin(j),l);
// INTERACCION MU
Ui_kernel<<<numblocks,numthreads>>>(nqubits-1,15,dev_R,dev_I,cos(.01),sin(.01),l);
//evolucion patada magnetica
for(int i=0;i<nqubits-1;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
// bC(0)=(itpp::pi)/(2*std::sqrt(2)); bC(1)=0.; bC(2)=(itpp::pi)/(2*std::sqrt(2));
// set_parameters(bC,kcosC,ksinC,bxC,byC,bzC);
// Uk_kernel<<<numblocks,numthreads>>>(nqubits-1,dev_R,dev_I,bxC,byC,bzC,kcosC,ksinC,l);
return;
}
void model4(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 4 CLOSED
<- * * * * * * * ->
\ / / /
<- * * * * ->
\
* last qubit
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamaño xlen
for(int i=0;i<xlen;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%(xlen),dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamaño nqubits - xlen - 1
for(int i=0;i<nqubits-1-xlen;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i+xlen,(i+1)%(nqubits-1-xlen)+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//las interacciones A B
Ui_kernel<<<numblocks,numthreads>>>(xlen-4,xlen,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(xlen-2,xlen+2,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(xlen-1,xlen+4,dev_R,dev_I,cos(jp),sin(jp),l);
//se hace la interacion 0 con A
Ui_kernel<<<numblocks,numthreads>>>(nqubits-1,0,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model4_open(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 4 OPEN
* * * * * * *
\ / / /
* * * *
\
* last qubit - not kicked
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamaño xlen
for(int i=0;i<xlen-1;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,i+1,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamaño nqubits - xlen - 1
for(int i=0;i<nqubits-2-xlen;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i+xlen,i+1+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la interaccion A B
Ui_kernel<<<numblocks,numthreads>>>(0,xlen,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(xlen/2-1,nqubits-xlen,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(xlen/2+1,nqubits-xlen+1,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(xlen-1,nqubits-2,dev_R,dev_I,cos(jp),sin(jp),l);
//se hace la interacion 0 con A
Ui_kernel<<<numblocks,numthreads>>>(nqubits-1,xlen/2,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits-1;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model5(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 5 CLOSED
<- * * * * * * ->
\ \ \ \ \ \
<- * * * * * * ->
\
* last qubit
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamaño xlen
for(int i=0;i<xlen;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%(xlen),dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamaño nqubits - xlen - 1
for(int i=0;i<nqubits-1-xlen;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i+xlen,(i+1)%(nqubits-1-xlen)+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la interaccion A B es 1 a 1
for(int i=0;i<xlen-1;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,i+xlen,dev_R,dev_I,cos(jp),sin(jp),l);
}
//se hace la interacion 0 con A
Ui_kernel<<<numblocks,numthreads>>>(nqubits-1,0,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model5_open(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 5 OPEN
* * * * * *
\ \ \ \ \ \
* * * * * *
\
* last qubit
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamaño xlen
for(int i=0;i<xlen-1;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,i+1,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamaño nqubits - xlen - 1
for(int i=0;i<nqubits-2-xlen;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i+xlen,i+1+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la interaccion A B es 1 a 1
for(int i=0;i<xlen-1;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,i+xlen,dev_R,dev_I,cos(jp),sin(jp),l);
}
//se hace la interacion 0 con A
Ui_kernel<<<numblocks,numthreads>>>(nqubits-1,0,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model6(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 6 CLOSED
<-* * * * * * ->
\ / \ /
<- * * * * * * ->
\ \ / \
<- * * * * * * ->
\
* last qubit
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamaño xlen
for(int i=0;i<xlen;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%(xlen),dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamaño nqubits - xlen - 1
for(int i=0;i<nqubits-1-xlen;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i+xlen,(i+1)%(nqubits-1-xlen)+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la interaccion A B es 1 a 1
for(int i=0;i<xlen-1;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,i+xlen,dev_R,dev_I,cos(jp),sin(jp),l);
}
//se hace la interacion 0 con A
Ui_kernel<<<numblocks,numthreads>>>(nqubits-1,0,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model7(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 7 CASO ESPECIAL
* * * * * * *
\ / / /
* * * *
\
* last qubit - not kicked
PARA A=6 B=10
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamaño xlen
for(int i=0;i<xlen-1;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,i+1,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamaño nqubits - xlen - 1
for(int i=0;i<nqubits-2-xlen;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i+xlen,i+1+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la interaccion A B
Ui_kernel<<<numblocks,numthreads>>>(0,6,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(1,7,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(2,9,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(3,10,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(4,13,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(5,15,dev_R,dev_I,cos(jp),sin(jp),l);
//CONEXIONES EXTRA A B
//Ui_kernel<<<numblocks,numthreads>>>(0,7,dev_R,dev_I,cos(jp),sin(jp),l);
//Ui_kernel<<<numblocks,numthreads>>>(1,8,dev_R,dev_I,cos(jp),sin(jp),l);
//Ui_kernel<<<numblocks,numthreads>>>(2,10,dev_R,dev_I,cos(jp),sin(jp),l);
//Ui_kernel<<<numblocks,numthreads>>>(3,11,dev_R,dev_I,cos(jp),sin(jp),l);
//Ui_kernel<<<numblocks,numthreads>>>(4,12,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(5,14,dev_R,dev_I,cos(jp),sin(jp),l);
//se hace la interacion 0 con A
Ui_kernel<<<numblocks,numthreads>>>(nqubits-1,2,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits-1;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void model8(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL 8 CASO ESPECIAL
* * * * * * *
\ / / /
* * * *
\
* last qubit - not kicked
PARA A=10 B=15
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamaño xlen
for(int i=0;i<xlen-1;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,i+1,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamaño nqubits - xlen - 1
for(int i=0;i<nqubits-2-xlen;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i+xlen,i+1+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la interaccion A B
Ui_kernel<<<numblocks,numthreads>>>(0,10,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(1,12,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(2,13,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(3,15,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(4,17,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(5,18,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(6,19,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(7,21,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(8,23,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(9,25,dev_R,dev_I,cos(jp),sin(jp),l);
//INTERACCIONES EXTRA A B
/* Ui_kernel<<<numblocks,numthreads>>>(0,11,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(1,13,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(2,14,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(3,16,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(4,16,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(5,19,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(6,20,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(7,20,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(8,22,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(9,24,dev_R,dev_I,cos(jp),sin(jp),l); */
//se hace la interacion 0 con A
Ui_kernel<<<numblocks,numthreads>>>(nqubits-1,6,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits-1;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
void modelVar(double *dev_R, double *dev_I, itpp::vec js, double j, double jp, itpp::mat b , int nqubits, int xlen,itpp::ivec conA, itpp::ivec conB){
/* MODEL VARIABLE CASO ESPECIAL
* * * * * * *
\ / / /
* * * *
\
* last qubit - not kicked
PARA A=6 B=10
*/
int numthreads, numblocks;
double kcos,ksin,bx,by,bz;
int l=pow(2,nqubits);
choosenumblocks(l,numthreads,numblocks);
//la evolucion de la cadena A de tamaño xlen
for(int i=0;i<xlen-1;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i,i+1,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la evolucion de la cadena B de tamaño nqubits - xlen - 1
for(int i=0;i<nqubits-2-xlen;i++) {
Ui_kernel<<<numblocks,numthreads>>>(i+xlen,i+1+xlen,dev_R,dev_I,cos(js(i)),sin(js(i)),l);
}
//la interaccion variable A B
Ui_kernel<<<numblocks,numthreads>>>(0,14,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(3,8,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(4,15,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(4,12,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(5,6,dev_R,dev_I,cos(jp),sin(jp),l);
Ui_kernel<<<numblocks,numthreads>>>(5,10,dev_R,dev_I,cos(jp),sin(jp),l);
//se hace la interacion 0 con A
Ui_kernel<<<numblocks,numthreads>>>(nqubits-1,2,dev_R,dev_I,cos(j),sin(j),l);
//evolucion patada magnetica
for(int i=0;i<nqubits-1;i++) {
set_parameters(b.get_row(i),kcos,ksin,bx,by,bz);
Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx,by,bz,kcos,ksin,l);
}
return;
}
}
#endif |
7b884bb8cb5919c8e9c5dd8ebfee68871245c0d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <THH/THH.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info) {
AT_ERROR("solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
AT_ERROR("solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
AT_ERROR("lu only takes float or double Tensors");
}
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
AT_ERROR("lu only takes float or double Tensors");
}
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info) {
AT_ERROR("lu only takes float or double Tensors");
}
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("lu only takes float or double Tensors");
}
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("getri only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info) {
AT_ERROR("cholesky_solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("cholesky_solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info) {
AT_ERROR("cholesky only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("cholesky only takes float or double Tensors");
}
template<class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb) {
AT_ERROR("triangular_solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
AT_ERROR("triangular_solve only takes float or double Tensors");
}
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
magma_spotrf_gpu(uplo, n, dA, ldda, info);
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaTriangularSolve<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) {
magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
}
template<>
void magmaTriangularSolve<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) {
magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
#endif
#define ALLOCATE_ARRAY(name, type, size, dummy_tensor) \
auto storage_##name = pin_memory<type>(size, dummy_tensor); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaSolve<scalar_t>(n, nrhs, A_data, n, ipiv.data<magma_int_t>(),
b_data, n, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, b);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, b);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, b);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
magmaSolveBatched<scalar_t>(
n, nrhs, A_array, n, ipiv_array, b_array, n,
info_array, batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos[0], "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, self);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, self);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, self);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size, self);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size, self_inv);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
magmaGetriBatched<scalar_t>(
n, self_array, n, ipiv_array, self_inv_array,
n, info_array, batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
// Because this is out-of-place inverse, the predefined macros will
// not work
Tensor _inverse_helper_cuda(const Tensor& self) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse_cuda");
return self_inv_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array, n, b_array, n,
info_tmp, batch_size, magma_queue);
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
AT_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, n, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, self);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size, self);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array, n, info_array,
batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_cuda", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-1), "n");
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// We will use the normal getrf function to compute the LU factorization
// and the pivots
// We create temporary tensors on the CPU, because tensors on the GPU
// cause segfault when passed to magmaLu and magmaLuNoPiv. The data is later
// copied to the appropriate tensors.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({n}, at::kInt);
magmaLu<scalar_t>(
n, n, self_data, n, piv_tmp.data<magma_int_t>(), info_tmp.data<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(n, n, self_data, n, info_tmp.data<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size, self);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size, pivots);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
n, n, self_array, n, pivots_array,
infos.data<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
n, n, self_array, n, infos.data<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
AT_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
squareCheckInputs(self);
auto req_size = self.sizes().vec();
req_size.pop_back();
Tensor pivots_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu");
} else {
batchCheckErrors(infos_tensor, "lu");
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t, bool upper>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__
void triu_tril_kernel(
scalar_t* result, scalar_t* self, int64_t k, int64_t N,
int64_t res_batch_stride, int64_t res_row_stride, int64_t res_col_stride,
int64_t self_batch_stride, int64_t self_row_stride, int64_t self_col_stride, int64_t self_ncol) {
int64_t linear_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_idx >= N) {
return;
}
int64_t self_batch_idx = blockIdx.y;
int64_t row = linear_idx / self_ncol;
int64_t col = linear_idx % self_ncol;
bool mask = upper ? (col - row >= k) : (col - row <= k);
// Now compute the offset for the self and result tensor
int64_t res_offset = self_batch_idx * res_batch_stride + row * res_row_stride + col * res_col_stride;
int64_t self_offset = self_batch_idx * self_batch_stride + row * self_row_stride + col * self_col_stride;
result[res_offset] = mask ? self[self_offset] : scalar_t(0);
}
template <bool upper>
Tensor& triu_tril_cuda_template(Tensor& result, const Tensor& self, int64_t k, const char* name) {
int64_t n_batches = batchCount(self), mat_size = self.size(-1) * self.size(-2),
res_batch_stride = result.dim() > 2 ? result.stride(-3) : 1,
res_row_stride = result.stride(-2), res_col_stride = result.stride(-1),
self_batch_stride = self.dim() > 2 ? self.stride(-3) : 1,
self_row_stride = self.stride(-2), self_col_stride = self.stride(-1);
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid((mat_size + dim_block.x - 1) / dim_block.x, n_batches);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), name, [&]{
hipLaunchKernelGGL(( triu_tril_kernel<scalar_t, upper>)
, dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data<scalar_t>(), self.data<scalar_t>(), k, mat_size,
res_batch_stride, res_row_stride, res_col_stride,
self_batch_stride, self_row_stride, self_col_stride, self.size(-1));
});
AT_CUDA_CHECK(hipGetLastError());
return result;
}
Tensor& tril_cuda_(Tensor &self, int64_t k) {
bool inplace = checkTrilTriuBatchContiguous(self);
Tensor self_c = inplace ? self : self.contiguous();
Tensor result = inplace ? self : at::empty_like(self);
tril_cuda_out(result, self_c, k);
if (!inplace) self.copy_(result);
return self;
}
Tensor& tril_cuda_out(Tensor &result, const Tensor& self, int64_t k) {
if (result.sizes() != self.sizes()) {
result.resize_as_(self);
}
if (self.numel() == 0) {
return result;
}
Tensor self_c = checkTrilTriuBatchContiguous(self) ? self : self.contiguous();
return triu_tril_cuda_template<false>(result, self_c, k, "tril");
}
Tensor& triu_cuda_(Tensor &self, int64_t k) {
bool inplace = checkTrilTriuBatchContiguous(self);
Tensor self_c = inplace ? self : self.contiguous();
Tensor result = inplace ? self : at::empty_like(self);
triu_cuda_out(result, self_c, k);
if (!inplace) self.copy_(result);
return self;
}
Tensor& triu_cuda_out(Tensor &result, const Tensor& self, int64_t k) {
if (result.sizes() != self.sizes()) {
result.resize_as_(self);
}
if (self.numel() == 0) {
return result;
}
Tensor self_c = checkTrilTriuBatchContiguous(self) ? self : self.contiguous();
return triu_tril_cuda_template<true>(result, self_c, k, "triu");
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
if (b.dim() == 2) {
magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array, n,
b_array, n, batch_size, magma_queue);
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
| 7b884bb8cb5919c8e9c5dd8ebfee68871245c0d3.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <THC/THC.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info) {
AT_ERROR("solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
AT_ERROR("solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
AT_ERROR("lu only takes float or double Tensors");
}
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
AT_ERROR("lu only takes float or double Tensors");
}
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info) {
AT_ERROR("lu only takes float or double Tensors");
}
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("lu only takes float or double Tensors");
}
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("getri only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info) {
AT_ERROR("cholesky_solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("cholesky_solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info) {
AT_ERROR("cholesky only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("cholesky only takes float or double Tensors");
}
template<class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb) {
AT_ERROR("triangular_solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
AT_ERROR("triangular_solve only takes float or double Tensors");
}
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
magma_spotrf_gpu(uplo, n, dA, ldda, info);
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaTriangularSolve<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) {
magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
}
template<>
void magmaTriangularSolve<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) {
magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
#endif
#define ALLOCATE_ARRAY(name, type, size, dummy_tensor) \
auto storage_##name = pin_memory<type>(size, dummy_tensor); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaSolve<scalar_t>(n, nrhs, A_data, n, ipiv.data<magma_int_t>(),
b_data, n, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, b);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, b);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, b);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
magmaSolveBatched<scalar_t>(
n, nrhs, A_array, n, ipiv_array, b_array, n,
info_array, batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos[0], "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, self);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, self);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, self);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size, self);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size, self_inv);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
magmaGetriBatched<scalar_t>(
n, self_array, n, ipiv_array, self_inv_array,
n, info_array, batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
// Because this is out-of-place inverse, the predefined macros will
// not work
Tensor _inverse_helper_cuda(const Tensor& self) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse_cuda");
return self_inv_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array, n, b_array, n,
info_tmp, batch_size, magma_queue);
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
AT_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, n, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, self);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size, self);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array, n, info_array,
batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_cuda", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-1), "n");
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// We will use the normal getrf function to compute the LU factorization
// and the pivots
// We create temporary tensors on the CPU, because tensors on the GPU
// cause segfault when passed to magmaLu and magmaLuNoPiv. The data is later
// copied to the appropriate tensors.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({n}, at::kInt);
magmaLu<scalar_t>(
n, n, self_data, n, piv_tmp.data<magma_int_t>(), info_tmp.data<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(n, n, self_data, n, info_tmp.data<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size, self);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size, pivots);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
n, n, self_array, n, pivots_array,
infos.data<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
n, n, self_array, n, infos.data<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
AT_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
squareCheckInputs(self);
auto req_size = self.sizes().vec();
req_size.pop_back();
Tensor pivots_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu");
} else {
batchCheckErrors(infos_tensor, "lu");
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t, bool upper>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__
void triu_tril_kernel(
scalar_t* result, scalar_t* self, int64_t k, int64_t N,
int64_t res_batch_stride, int64_t res_row_stride, int64_t res_col_stride,
int64_t self_batch_stride, int64_t self_row_stride, int64_t self_col_stride, int64_t self_ncol) {
int64_t linear_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_idx >= N) {
return;
}
int64_t self_batch_idx = blockIdx.y;
int64_t row = linear_idx / self_ncol;
int64_t col = linear_idx % self_ncol;
bool mask = upper ? (col - row >= k) : (col - row <= k);
// Now compute the offset for the self and result tensor
int64_t res_offset = self_batch_idx * res_batch_stride + row * res_row_stride + col * res_col_stride;
int64_t self_offset = self_batch_idx * self_batch_stride + row * self_row_stride + col * self_col_stride;
result[res_offset] = mask ? self[self_offset] : scalar_t(0);
}
template <bool upper>
Tensor& triu_tril_cuda_template(Tensor& result, const Tensor& self, int64_t k, const char* name) {
int64_t n_batches = batchCount(self), mat_size = self.size(-1) * self.size(-2),
res_batch_stride = result.dim() > 2 ? result.stride(-3) : 1,
res_row_stride = result.stride(-2), res_col_stride = result.stride(-1),
self_batch_stride = self.dim() > 2 ? self.stride(-3) : 1,
self_row_stride = self.stride(-2), self_col_stride = self.stride(-1);
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid((mat_size + dim_block.x - 1) / dim_block.x, n_batches);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), name, [&]{
triu_tril_kernel<scalar_t, upper>
<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
result.data<scalar_t>(), self.data<scalar_t>(), k, mat_size,
res_batch_stride, res_row_stride, res_col_stride,
self_batch_stride, self_row_stride, self_col_stride, self.size(-1));
});
AT_CUDA_CHECK(cudaGetLastError());
return result;
}
Tensor& tril_cuda_(Tensor &self, int64_t k) {
bool inplace = checkTrilTriuBatchContiguous(self);
Tensor self_c = inplace ? self : self.contiguous();
Tensor result = inplace ? self : at::empty_like(self);
tril_cuda_out(result, self_c, k);
if (!inplace) self.copy_(result);
return self;
}
Tensor& tril_cuda_out(Tensor &result, const Tensor& self, int64_t k) {
if (result.sizes() != self.sizes()) {
result.resize_as_(self);
}
if (self.numel() == 0) {
return result;
}
Tensor self_c = checkTrilTriuBatchContiguous(self) ? self : self.contiguous();
return triu_tril_cuda_template<false>(result, self_c, k, "tril");
}
Tensor& triu_cuda_(Tensor &self, int64_t k) {
bool inplace = checkTrilTriuBatchContiguous(self);
Tensor self_c = inplace ? self : self.contiguous();
Tensor result = inplace ? self : at::empty_like(self);
triu_cuda_out(result, self_c, k);
if (!inplace) self.copy_(result);
return self;
}
Tensor& triu_cuda_out(Tensor &result, const Tensor& self, int64_t k) {
if (result.sizes() != self.sizes()) {
result.resize_as_(self);
}
if (self.numel() == 0) {
return result;
}
Tensor self_c = checkTrilTriuBatchContiguous(self) ? self : self.contiguous();
return triu_tril_cuda_template<true>(result, self_c, k, "triu");
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
if (b.dim() == 2) {
magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array, n,
b_array, n, batch_size, magma_queue);
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
daa5556e7926cf7e6fb81a9ac38275988be9dcef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// BUG IN SRAD APPLICATIONS SEEMS TO BE SOMEWHERE IN THIS CODE, MEMORY
// CORRUPTION
// srad kernel
__global__ void srad2(fp d_lambda, int d_Nr, int d_Nc, long d_Ne,
// int16_t *d_iN,
// int16_t *d_iS, int16_t *d_jE, int16_t *d_jW,
fp *d_dN, fp *d_dS,
fp *d_dE, fp *d_dW, fp *d_c, fp *d_I)
{
// indexes
int bx = blockIdx.x; // get current horizontal block index
int tx = threadIdx.x; // get current horizontal thread index
int ei = bx * NUMBER_THREADS + tx; // more threads than actual elements !!!
int row; // column, x position
int col; // row, y position
// variables
fp d_cN, d_cS, d_cW, d_cE;
fp d_D;
// figure out row/col location in new matrix
row = (ei + 1) % d_Nr - 1; // (0-n) row
col = (ei + 1) / d_Nr + 1 - 1; // (0-n) column
if ((ei + 1) % d_Nr == 0) {
row = d_Nr - 1;
col = col - 1;
}
if (ei < d_Ne) { // make sure that only threads matching jobs run
int iN, iS, jW, jE;
if(row == d_Nr -1){
iS = d_Nr -1;
}else{
iS = row+1;
}
if(col == d_Nc -1){
jE = d_Nc - 1;
}else{
jE = col +1;
}
// diffusion coefficent
d_cN = d_c[ei]; // north diffusion coefficient
d_cS = d_c[iS + d_Nr * col]; // south diffusion coefficient
d_cW = d_c[ei]; // west diffusion coefficient
d_cE = d_c[row + d_Nr * jE]; // east diffusion coefficient
// divergence (equ 58)
d_D = d_cN * d_dN[ei] + d_cS * d_dS[ei] + d_cW * d_dW[ei] +
d_cE * d_dE[ei]; // divergence
// image update (equ 61) (every element of IMAGE)
d_I[ei] =
d_I[ei] +
0.25 * d_lambda *
d_D; // updates image (based on input time step and divergence)
}
}
| daa5556e7926cf7e6fb81a9ac38275988be9dcef.cu | // BUG IN SRAD APPLICATIONS SEEMS TO BE SOMEWHERE IN THIS CODE, MEMORY
// CORRUPTION
// srad kernel
__global__ void srad2(fp d_lambda, int d_Nr, int d_Nc, long d_Ne,
// int16_t *d_iN,
// int16_t *d_iS, int16_t *d_jE, int16_t *d_jW,
fp *d_dN, fp *d_dS,
fp *d_dE, fp *d_dW, fp *d_c, fp *d_I)
{
// indexes
int bx = blockIdx.x; // get current horizontal block index
int tx = threadIdx.x; // get current horizontal thread index
int ei = bx * NUMBER_THREADS + tx; // more threads than actual elements !!!
int row; // column, x position
int col; // row, y position
// variables
fp d_cN, d_cS, d_cW, d_cE;
fp d_D;
// figure out row/col location in new matrix
row = (ei + 1) % d_Nr - 1; // (0-n) row
col = (ei + 1) / d_Nr + 1 - 1; // (0-n) column
if ((ei + 1) % d_Nr == 0) {
row = d_Nr - 1;
col = col - 1;
}
if (ei < d_Ne) { // make sure that only threads matching jobs run
int iN, iS, jW, jE;
if(row == d_Nr -1){
iS = d_Nr -1;
}else{
iS = row+1;
}
if(col == d_Nc -1){
jE = d_Nc - 1;
}else{
jE = col +1;
}
// diffusion coefficent
d_cN = d_c[ei]; // north diffusion coefficient
d_cS = d_c[iS + d_Nr * col]; // south diffusion coefficient
d_cW = d_c[ei]; // west diffusion coefficient
d_cE = d_c[row + d_Nr * jE]; // east diffusion coefficient
// divergence (equ 58)
d_D = d_cN * d_dN[ei] + d_cS * d_dS[ei] + d_cW * d_dW[ei] +
d_cE * d_dE[ei]; // divergence
// image update (equ 61) (every element of IMAGE)
d_I[ei] =
d_I[ei] +
0.25 * d_lambda *
d_D; // updates image (based on input time step and divergence)
}
}
|
5343437f3cb085b6662aa7095eca5f70d51ce1cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include <iostream>
#include "util.h"
#include "hip/hip_fp16.h"
//add kernel
template<typename T>
__global__ void elt_add(T *a,T *b,T *c,int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<size){
*(c+i) = *(a+i) + *(b+i);
}
}
template<>
__global__ void elt_add(__half *a,__half *b,__half *c,int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<size){
*(c+i) = __hadd(*(a+i), *(b+i));
}
}
//sub kernel
template<typename T>
__global__ void elt_sub(T *a,T *b,T *c,int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<size){
*(c+i) = *(a+i) - *(b+i);
}
}
template<>
__global__ void elt_sub(__half *a,__half *b,__half *c,int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<size){
*(c+i) = __hsub(*(a+i), *(b+i));
}
}
//mul kernel
template<typename T>
__global__ void elt_mul(T *a,T *b,T *c,int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<size){
*(c+i) = *(a+i) * *(b+i);
}
}
template<>
__global__ void elt_mul(__half *a,__half *b,__half *c,int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<size){
*(c+i) = __hmul(*(a+i), *(b+i));
}
}
//div kernel
template<typename T>
__global__ void elt_div(T *a,T *b,T *c,int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<size){
*(c+i) = *(a+i) / *(b+i);
}
}
template<>
__global__ void elt_div(__half *a,__half *b,__half *c,int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<size){
*(c+i) = __hdiv(*(a+i), *(b+i));
}
}
#define REGSITER_BINARY_OP(name) \
void name##_op(char *a,char *b,char *c,int size,Dtype dtype,int thread_size=32)\
{\
int block_size = ::ceil(size*1.0/(elem_size(dtype)*thread_size));\
switch(dtype){\
case Dtype::float16:\
hipLaunchKernelGGL(( name), dim3(block_size),dim3(thread_size), 0, 0, (__half*)a,(__half*)b,(__half*)c,size);\
break;\
case Dtype::float32:\
hipLaunchKernelGGL(( name), dim3(block_size),dim3(thread_size), 0, 0, (float*)a,(float*)b,(float*)c,size);\
break;\
case Dtype::float64:\
hipLaunchKernelGGL(( name), dim3(block_size),dim3(thread_size), 0, 0, (double*)a,(double*)b,(double*)c,size);\
break;\
case Dtype::int8:\
hipLaunchKernelGGL(( name), dim3(block_size),dim3(thread_size), 0, 0, (int8_t*)a,(int8_t*)b,(int8_t*)c,size);\
break;\
case Dtype::uint8:\
hipLaunchKernelGGL(( name), dim3(block_size),dim3(thread_size), 0, 0, (uint8_t*)a,(uint8_t*)b,(uint8_t*)c,size);\
break;\
case Dtype::int16:\
hipLaunchKernelGGL(( name), dim3(block_size),dim3(thread_size), 0, 0, (int16_t*)a,(int16_t*)b,(int16_t*)c,size);\
break;\
case Dtype::uint16:\
hipLaunchKernelGGL(( name), dim3(block_size),dim3(thread_size), 0, 0, (uint16_t*)a,(uint16_t*)b,(uint16_t*)c,size);\
break;\
case Dtype::int32:\
hipLaunchKernelGGL(( name), dim3(block_size),dim3(thread_size), 0, 0, (int32_t*)a,(int32_t*)b,(int32_t*)c,size);\
break;\
case Dtype::uint32:\
hipLaunchKernelGGL(( name), dim3(block_size),dim3(thread_size), 0, 0, (uint32_t*)a,(uint32_t*)b,(uint32_t*)c,size);\
break;\
case Dtype::int64:\
hipLaunchKernelGGL(( name), dim3(block_size),dim3(thread_size), 0, 0, (int64_t*)a,(int64_t*)b,(int64_t*)c,size);\
break;\
case Dtype::uint64:\
hipLaunchKernelGGL(( name), dim3(block_size),dim3(thread_size), 0, 0, (uint64_t*)a,(uint64_t*)b,(uint64_t*)c,size);\
break;\
default:\
std::cerr<<"use wrong type"<<std::endl;\
}\
}\
//regsiter op
REGSITER_BINARY_OP(elt_add)
REGSITER_BINARY_OP(elt_sub)
REGSITER_BINARY_OP(elt_mul)
REGSITER_BINARY_OP(elt_div) | 5343437f3cb085b6662aa7095eca5f70d51ce1cf.cu | #include <stdint.h>
#include <iostream>
#include "util.h"
#include "cuda_fp16.h"
//add kernel
template<typename T>
__global__ void elt_add(T *a,T *b,T *c,int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<size){
*(c+i) = *(a+i) + *(b+i);
}
}
template<>
__global__ void elt_add(__half *a,__half *b,__half *c,int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<size){
*(c+i) = __hadd(*(a+i), *(b+i));
}
}
//sub kernel
template<typename T>
__global__ void elt_sub(T *a,T *b,T *c,int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<size){
*(c+i) = *(a+i) - *(b+i);
}
}
template<>
__global__ void elt_sub(__half *a,__half *b,__half *c,int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<size){
*(c+i) = __hsub(*(a+i), *(b+i));
}
}
//mul kernel
template<typename T>
__global__ void elt_mul(T *a,T *b,T *c,int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<size){
*(c+i) = *(a+i) * *(b+i);
}
}
template<>
__global__ void elt_mul(__half *a,__half *b,__half *c,int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<size){
*(c+i) = __hmul(*(a+i), *(b+i));
}
}
//div kernel
template<typename T>
__global__ void elt_div(T *a,T *b,T *c,int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<size){
*(c+i) = *(a+i) / *(b+i);
}
}
template<>
__global__ void elt_div(__half *a,__half *b,__half *c,int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<size){
*(c+i) = __hdiv(*(a+i), *(b+i));
}
}
#define REGSITER_BINARY_OP(name) \
void name##_op(char *a,char *b,char *c,int size,Dtype dtype,int thread_size=32)\
{\
int block_size = std::ceil(size*1.0/(elem_size(dtype)*thread_size));\
switch(dtype){\
case Dtype::float16:\
name<<<block_size,thread_size>>>((__half*)a,(__half*)b,(__half*)c,size);\
break;\
case Dtype::float32:\
name<<<block_size,thread_size>>>((float*)a,(float*)b,(float*)c,size);\
break;\
case Dtype::float64:\
name<<<block_size,thread_size>>>((double*)a,(double*)b,(double*)c,size);\
break;\
case Dtype::int8:\
name<<<block_size,thread_size>>>((int8_t*)a,(int8_t*)b,(int8_t*)c,size);\
break;\
case Dtype::uint8:\
name<<<block_size,thread_size>>>((uint8_t*)a,(uint8_t*)b,(uint8_t*)c,size);\
break;\
case Dtype::int16:\
name<<<block_size,thread_size>>>((int16_t*)a,(int16_t*)b,(int16_t*)c,size);\
break;\
case Dtype::uint16:\
name<<<block_size,thread_size>>>((uint16_t*)a,(uint16_t*)b,(uint16_t*)c,size);\
break;\
case Dtype::int32:\
name<<<block_size,thread_size>>>((int32_t*)a,(int32_t*)b,(int32_t*)c,size);\
break;\
case Dtype::uint32:\
name<<<block_size,thread_size>>>((uint32_t*)a,(uint32_t*)b,(uint32_t*)c,size);\
break;\
case Dtype::int64:\
name<<<block_size,thread_size>>>((int64_t*)a,(int64_t*)b,(int64_t*)c,size);\
break;\
case Dtype::uint64:\
name<<<block_size,thread_size>>>((uint64_t*)a,(uint64_t*)b,(uint64_t*)c,size);\
break;\
default:\
std::cerr<<"use wrong type"<<std::endl;\
}\
}\
//regsiter op
REGSITER_BINARY_OP(elt_add)
REGSITER_BINARY_OP(elt_sub)
REGSITER_BINARY_OP(elt_mul)
REGSITER_BINARY_OP(elt_div) |
f1dc20358d5de8976b800a15f14c61ad48340746.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unistd.h>
#include <stdio.h>
#include <math.h>
#include <png.h>
#include <string.h>
#include <stdlib.h>
#include <omp.h>
#define MAX_ITERATION 100
#define CEIL(a, b) (((a) + (b) - 1)/(b))
int writeImage(char* filename, int width, int height, float *buffer, char* title);
static void setRGB(png_byte *ptr, float val);
float *in_CPU_parallel(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int iteration_max, float *buffer);
float *in_CPU(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int iteration_max, float *buffer);
__global__ void kernel(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int iteration, float *buffer);
/* ./exec <min_real> <min_imag> <max_real> <max_imag> <W> <H> <CPU/GPU> <Treadhs> <Saida> */
int main(int argc, char *argv[]){
if(argc != 10){
printf("Please specify output file\n");
return 1;
}
/* Set variables */
float min_real = atof(argv[1]);
float min_imag = atof(argv[2]);
float max_real = atof(argv[3]);
float max_imag = atof(argv[4]);
int width = atoi(argv[5]);
int height = atoi(argv[6]);
char accelerator[50];
strcpy(accelerator, argv[7]);
int threads = atoi(argv[8]);
char file_name[50];
strcpy(file_name, argv[9]);
int iteration = MAX_ITERATION;
int flag_accelerator = 0;
float *buffer = (float*)malloc(width * height * sizeof(float));
if (buffer == NULL) {
printf("Could not create image buffer\n");
return NULL;
}
/* Checks if in CPU or GPU */
if(accelerator[0] == "c"){
printf("Process Image in CPU\n");
if(threads > 1){
omp_set_num_threads(threads);
buffer = in_CPU_parallel(width, height, min_real, min_imag, max_real, max_imag, iteration, buffer);
}else
buffer = in_CPU(width, height, min_real, min_imag, max_real, max_imag, iteration, buffer);
}else{
printf("Process Image in GPU\n");
flag_accelerator = 1;
/* Aloco na placa de video */
hipMalloc(&d_buffer, width*height*sizeof(float));
/* Setar os bocos para trabalhar na placa de video */
int threads_per_block = threads;
int num_blocks = CEIL((width*height), threads_per_block);
hipLaunchKernelGGL(( kernel), dim3(num_blocks), dim3(threads_per_block), 0, 0, width, height, min_real, min_imag, max_imag, max_real, iteration, d_buffer);
hipDeviceSynchronize();
/* Pego o buffer da Placa de video e trago para o PC */
hipMemcpy(buffer, d_buffer, width*height*sizeof(float), hipMemcpyDeviceToHost);
}
printf("Saving PNG\n");
int result = writeImage(file_name, width, height, buffer, "MANDELBROT");
free(buffer);
if(flag_accelerator)
hipFree(buffer);
return result;
}
float *in_CPU_parallel(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int iteration_max, float *buffer){
int x_position, y_position;
float delt_x= (max_real - min_real) / width;
float delt_y= (max_imag - min_imag) / height;
float z_x, z_y;
float z_x2, z_y2;
int iteration = 0;
float y_pointer;
float x_pointer;
#pragma omp parallel for schedule(dynamic,10) private(y_pointer, x_pointer, y_position, x_position, z_x, z_y, z_y2, z_x2, modZ, mu, iteration) collapse(2)
for (y_position = 0 ; y_position < height ; y_position++){
for (x_position = 0 ; x_position < width ; x_position++){
y_pointer = max_imag - delt_y * y_position;
x_pointer = min_real + delt_x * x_position;
#pragma omp critical (section1)
{
z_x =x_pointer;
z_y = y_pointer;
z_x2 = z_x * z_x;
z_y2 = z_y * z_y;
}
for(iteration = 0; iteration < iteration_max && ((z_x2+z_y2) < 4); iteration++){
z_y = 2 * z_x * z_y + y_pointer;
z_x = z_x2 - z_y2 + x_pointer;
z_x2 = z_x * z_x;
z_y2 = z_y * z_y;
}
buffer[y_position * width + x_position] = iteration;
}
}
return buffer;
};
float *in_CPU(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int iteration_max, float *buffer){
int x_position, y_position;
float delt_x= (max_real - min_real) / width;
float delt_y= (max_imag - min_imag) / height;
float z_x, z_y;
float z_x2, z_y2;
int iteration = 0;
float y_pointer;
float x_pointer;
for (y_position = 0 ; y_position < height ; y_position++){
for (x_position = 0 ; x_position < width ; x_position++){
y_pointer = max_imag - delt_y * y_position;
x_pointer = min_real + delt_x * x_position;
z_x =x_pointer;
z_y = y_pointer;
z_x2 = z_x * z_x;
z_y2 = z_y * z_y;
for(iteration = 0; iteration < iteration_max && ((z_x2+z_y2) < 4); iteration++){
z_y = 2 * z_x * z_y + y_pointer;
z_x = z_x2 - z_y2 + x_pointer;
z_x2 = z_x * z_x;
z_y2 = z_y * z_y;
}
buffer[y_position * width + x_position] = iteration;
}
}
return buffer;
};
__global__ void kernel(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int iteration_max, float *buffer){
int index_vector = (blockIdx.x * blockDim.x) + threadIdx.x;
int i = index_vector / width;
int j = index_vector % width;
float del_x = (max_real - min_real)/width;
float del_y= (max_imag - min_imag)/height;
int iteration = 0;
float xtemp;
float x_point = min_real + del_x * j;
float y_point = max_imag - del_y * i;
float z_y = y_point;
float z_x = x_point;
float z_x2 = z_x * z_x;
float z_y2 = z_y * z_y;
for(iteration = 0; iteration < iteration_max && ((z_x2 + z_y2) <= 4)){
z_y = (2.0 * z_x * z_y)+ y_point;
z_x = z_x2 - z_y2 + x_point;
z_x2 = z_x * z_x;
z_y2 = z_y * z_y;
}
buffer[index_vector] = iteration;
};
static void setRGB(png_byte *ptr, float val){
int v = 255 - (int)(val/MAX_ITERATION) * 255;
if(v == 0){
ptr[0] = v;
ptr[1] = v;
ptr[2] = v;
}else{
if(val < 10){
ptr[0] = 192;
ptr[1] = 217;
ptr[2] = 217;
}else if(val < 15){
ptr[0] = 95;
ptr[1] = 159;
ptr[2] = 159;
}else if(val < 25){
ptr[0] = 0;
ptr[1] = 255;
ptr[2] = 255;
}else if(val < 50){
ptr[0] = 255;
ptr[1] = 0;
ptr[2] = 255;
}else if(val < 75){
ptr[0] = 234;
ptr[1] = 173;
ptr[2] = 234;
}else{
ptr[0] = 79;
ptr[1] = 47;
ptr[2] = 79;
}
}
};
int writeImage(char* filename, int width, int height, float *buffer, char* title){
int code = 0;
FILE *fp = NULL;
png_structp png_ptr = NULL;
png_infop info_ptr = NULL;
png_bytep row = NULL;
// Open file for writing (binary mode)
fp = fopen(filename, "wb");
if (fp == NULL) {
printf( "Could not open file %s for writing\n", filename);
code = 1;
goto finalise;
}
// Initialize write structure
png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (png_ptr == NULL) {
printf( "Could not allocate write struct\n");
code = 1;
goto finalise;
}
// Initialize info structure
info_ptr = png_create_info_struct(png_ptr);
if (info_ptr == NULL) {
printf( "Could not allocate info struct\n");
code = 1;
goto finalise;
}
// Setup Exception handling
if (setjmp(png_jmpbuf(png_ptr))) {
printf( "Error during png creation\n");
code = 1;
goto finalise;
}
png_init_io(png_ptr, fp);
// Write header (8 bit colour depth)
png_set_IHDR(png_ptr, info_ptr, width, height,
8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
png_write_info(png_ptr, info_ptr);
// Allocate memory for one row (3 bytes per pixel - RGB)
row = (png_bytep) malloc(3 * width * sizeof(png_byte));
// Write image data
int x, y;
for (y=0 ; y<height ; y++) {
for (x=0 ; x<width ; x++) {
setRGB(&(row[x*3]), buffer[y*width + x]);
}
png_write_row(png_ptr, row);
}
// End write
png_write_end(png_ptr, NULL);
finalise:
if (fp != NULL) fclose(fp);
if (info_ptr != NULL) png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
if (png_ptr != NULL) png_destroy_write_struct(&png_ptr, (png_infopp)NULL);
if (row != NULL) free(row);
return code;
};
| f1dc20358d5de8976b800a15f14c61ad48340746.cu | #include <unistd.h>
#include <stdio.h>
#include <math.h>
#include <png.h>
#include <string.h>
#include <stdlib.h>
#include <omp.h>
#define MAX_ITERATION 100
#define CEIL(a, b) (((a) + (b) - 1)/(b))
int writeImage(char* filename, int width, int height, float *buffer, char* title);
static void setRGB(png_byte *ptr, float val);
float *in_CPU_parallel(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int iteration_max, float *buffer);
float *in_CPU(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int iteration_max, float *buffer);
__global__ void kernel(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int iteration, float *buffer);
/* ./exec <min_real> <min_imag> <max_real> <max_imag> <W> <H> <CPU/GPU> <Treadhs> <Saida> */
int main(int argc, char *argv[]){
if(argc != 10){
printf("Please specify output file\n");
return 1;
}
/* Set variables */
float min_real = atof(argv[1]);
float min_imag = atof(argv[2]);
float max_real = atof(argv[3]);
float max_imag = atof(argv[4]);
int width = atoi(argv[5]);
int height = atoi(argv[6]);
char accelerator[50];
strcpy(accelerator, argv[7]);
int threads = atoi(argv[8]);
char file_name[50];
strcpy(file_name, argv[9]);
int iteration = MAX_ITERATION;
int flag_accelerator = 0;
float *buffer = (float*)malloc(width * height * sizeof(float));
if (buffer == NULL) {
printf("Could not create image buffer\n");
return NULL;
}
/* Checks if in CPU or GPU */
if(accelerator[0] == "c"){
printf("Process Image in CPU\n");
if(threads > 1){
omp_set_num_threads(threads);
buffer = in_CPU_parallel(width, height, min_real, min_imag, max_real, max_imag, iteration, buffer);
}else
buffer = in_CPU(width, height, min_real, min_imag, max_real, max_imag, iteration, buffer);
}else{
printf("Process Image in GPU\n");
flag_accelerator = 1;
/* Aloco na placa de video */
cudaMalloc(&d_buffer, width*height*sizeof(float));
/* Setar os bocos para trabalhar na placa de video */
int threads_per_block = threads;
int num_blocks = CEIL((width*height), threads_per_block);
kernel<<<num_blocks, threads_per_block>>>(width, height, min_real, min_imag, max_imag, max_real, iteration, d_buffer);
cudaDeviceSynchronize();
/* Pego o buffer da Placa de video e trago para o PC */
cudaMemcpy(buffer, d_buffer, width*height*sizeof(float), cudaMemcpyDeviceToHost);
}
printf("Saving PNG\n");
int result = writeImage(file_name, width, height, buffer, "MANDELBROT");
free(buffer);
if(flag_accelerator)
cudaFree(buffer);
return result;
}
float *in_CPU_parallel(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int iteration_max, float *buffer){
int x_position, y_position;
float delt_x= (max_real - min_real) / width;
float delt_y= (max_imag - min_imag) / height;
float z_x, z_y;
float z_x2, z_y2;
int iteration = 0;
float y_pointer;
float x_pointer;
#pragma omp parallel for schedule(dynamic,10) private(y_pointer, x_pointer, y_position, x_position, z_x, z_y, z_y2, z_x2, modZ, mu, iteration) collapse(2)
for (y_position = 0 ; y_position < height ; y_position++){
for (x_position = 0 ; x_position < width ; x_position++){
y_pointer = max_imag - delt_y * y_position;
x_pointer = min_real + delt_x * x_position;
#pragma omp critical (section1)
{
z_x =x_pointer;
z_y = y_pointer;
z_x2 = z_x * z_x;
z_y2 = z_y * z_y;
}
for(iteration = 0; iteration < iteration_max && ((z_x2+z_y2) < 4); iteration++){
z_y = 2 * z_x * z_y + y_pointer;
z_x = z_x2 - z_y2 + x_pointer;
z_x2 = z_x * z_x;
z_y2 = z_y * z_y;
}
buffer[y_position * width + x_position] = iteration;
}
}
return buffer;
};
float *in_CPU(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int iteration_max, float *buffer){
int x_position, y_position;
float delt_x= (max_real - min_real) / width;
float delt_y= (max_imag - min_imag) / height;
float z_x, z_y;
float z_x2, z_y2;
int iteration = 0;
float y_pointer;
float x_pointer;
for (y_position = 0 ; y_position < height ; y_position++){
for (x_position = 0 ; x_position < width ; x_position++){
y_pointer = max_imag - delt_y * y_position;
x_pointer = min_real + delt_x * x_position;
z_x =x_pointer;
z_y = y_pointer;
z_x2 = z_x * z_x;
z_y2 = z_y * z_y;
for(iteration = 0; iteration < iteration_max && ((z_x2+z_y2) < 4); iteration++){
z_y = 2 * z_x * z_y + y_pointer;
z_x = z_x2 - z_y2 + x_pointer;
z_x2 = z_x * z_x;
z_y2 = z_y * z_y;
}
buffer[y_position * width + x_position] = iteration;
}
}
return buffer;
};
__global__ void kernel(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int iteration_max, float *buffer){
int index_vector = (blockIdx.x * blockDim.x) + threadIdx.x;
int i = index_vector / width;
int j = index_vector % width;
float del_x = (max_real - min_real)/width;
float del_y= (max_imag - min_imag)/height;
int iteration = 0;
float xtemp;
float x_point = min_real + del_x * j;
float y_point = max_imag - del_y * i;
float z_y = y_point;
float z_x = x_point;
float z_x2 = z_x * z_x;
float z_y2 = z_y * z_y;
for(iteration = 0; iteration < iteration_max && ((z_x2 + z_y2) <= 4)){
z_y = (2.0 * z_x * z_y)+ y_point;
z_x = z_x2 - z_y2 + x_point;
z_x2 = z_x * z_x;
z_y2 = z_y * z_y;
}
buffer[index_vector] = iteration;
};
static void setRGB(png_byte *ptr, float val){
int v = 255 - (int)(val/MAX_ITERATION) * 255;
if(v == 0){
ptr[0] = v;
ptr[1] = v;
ptr[2] = v;
}else{
if(val < 10){
ptr[0] = 192;
ptr[1] = 217;
ptr[2] = 217;
}else if(val < 15){
ptr[0] = 95;
ptr[1] = 159;
ptr[2] = 159;
}else if(val < 25){
ptr[0] = 0;
ptr[1] = 255;
ptr[2] = 255;
}else if(val < 50){
ptr[0] = 255;
ptr[1] = 0;
ptr[2] = 255;
}else if(val < 75){
ptr[0] = 234;
ptr[1] = 173;
ptr[2] = 234;
}else{
ptr[0] = 79;
ptr[1] = 47;
ptr[2] = 79;
}
}
};
int writeImage(char* filename, int width, int height, float *buffer, char* title){
int code = 0;
FILE *fp = NULL;
png_structp png_ptr = NULL;
png_infop info_ptr = NULL;
png_bytep row = NULL;
// Open file for writing (binary mode)
fp = fopen(filename, "wb");
if (fp == NULL) {
printf( "Could not open file %s for writing\n", filename);
code = 1;
goto finalise;
}
// Initialize write structure
png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (png_ptr == NULL) {
printf( "Could not allocate write struct\n");
code = 1;
goto finalise;
}
// Initialize info structure
info_ptr = png_create_info_struct(png_ptr);
if (info_ptr == NULL) {
printf( "Could not allocate info struct\n");
code = 1;
goto finalise;
}
// Setup Exception handling
if (setjmp(png_jmpbuf(png_ptr))) {
printf( "Error during png creation\n");
code = 1;
goto finalise;
}
png_init_io(png_ptr, fp);
// Write header (8 bit colour depth)
png_set_IHDR(png_ptr, info_ptr, width, height,
8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
png_write_info(png_ptr, info_ptr);
// Allocate memory for one row (3 bytes per pixel - RGB)
row = (png_bytep) malloc(3 * width * sizeof(png_byte));
// Write image data
int x, y;
for (y=0 ; y<height ; y++) {
for (x=0 ; x<width ; x++) {
setRGB(&(row[x*3]), buffer[y*width + x]);
}
png_write_row(png_ptr, row);
}
// End write
png_write_end(png_ptr, NULL);
finalise:
if (fp != NULL) fclose(fp);
if (info_ptr != NULL) png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
if (png_ptr != NULL) png_destroy_write_struct(&png_ptr, (png_infopp)NULL);
if (row != NULL) free(row);
return code;
};
|
5e544999bf159c15ddde4d285600856d388b0235.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
const int maxThreadsPerBlock = 256; //to be on safe side.
size_t gcd (size_t a, size_t b) {
if (a == 0) {
return b;
}
return gcd(b%a, a);
}
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numCols*numRows) {
uchar4 pixel = rgbaImage[i];
float bwPixel = .299f * pixel.x + .587f * pixel.y + .114f * pixel.z;
greyImage[i] = bwPixel;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
size_t pixelCount = numRows*numCols;
size_t blockCount = pixelCount/maxThreadsPerBlock+1;
const dim3 gridSize(blockCount,1,1);
const dim3 blockSize(maxThreadsPerBlock,1,1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 5e544999bf159c15ddde4d285600856d388b0235.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
const int maxThreadsPerBlock = 256; //to be on safe side.
size_t gcd (size_t a, size_t b) {
if (a == 0) {
return b;
}
return gcd(b%a, a);
}
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numCols*numRows) {
uchar4 pixel = rgbaImage[i];
float bwPixel = .299f * pixel.x + .587f * pixel.y + .114f * pixel.z;
greyImage[i] = bwPixel;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
size_t pixelCount = numRows*numCols;
size_t blockCount = pixelCount/maxThreadsPerBlock+1;
const dim3 gridSize(blockCount,1,1);
const dim3 blockSize(maxThreadsPerBlock,1,1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
ce21bb4bf4a0d4b583bf2f0010497f19f077f7f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#define REARRANGED_DOMAIN
__global__ void evaluate_segment_reflective(
int N,
int Nids,
long * ids,
long * vol_ids,
long * edge_ids,
double * normals,
double * stage_edge_values,
double * bed_edge_values,
double * height_edge_values,
double * xmom_edge_values,
double * ymom_edge_values,
double * xvel_edge_values,
double * yvel_edge_values,
double * stage_boundary_values,
double * bed_boundary_values,
double * height_boundary_values,
double * xmom_boundary_values,
double * ymom_boundary_values,
double * xvel_boundary_values,
double * yvel_boundary_values)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if (k >= Nids)
return;
long id = ids[k],
id_vol = vol_ids[k], // Note here should be k, since we pass
id_edge= edge_ids[k]; // the vol_ids and edge_ids from CPU
#ifndef REARRANGED_DOMAIN
double n1 = normals[id_vol*6 + id_edge*2],
n2 = normals[id_vol*6 + id_edge*2 + 1];
double q1 = xmom_edge_values[id_vol*3 + id_edge],
q2 = ymom_edge_values[id_vol*3 + id_edge];
#else
double n1 = normals[id_vol + id_edge*2*N],
n2 = normals[id_vol + (id_edge*2+1)*N];
double q1 = xmom_edge_values[id_vol + id_edge*N],
q2 = ymom_edge_values[id_vol + id_edge*N];
#endif
double r1 = -q1*n1 - q2*n2,
r2 = -q1*n2 + q2*n1;
#ifndef REARRANGED_DOMAIN
stage_boundary_values[id] = stage_edge_values[id_vol*3 + id_edge];
bed_boundary_values[id] = bed_edge_values[id_vol*3 + id_edge];
height_boundary_values[id] = height_edge_values[id_vol*3 + id_edge];
q1 = xvel_edge_values[id_vol*3 + id_edge];
q2 = yvel_edge_values[id_vol*3 + id_edge];
#else
stage_boundary_values[id] = stage_edge_values[id_vol + id_edge*N];
bed_boundary_values[id] = bed_edge_values[id_vol + id_edge*N];
height_boundary_values[id] = height_edge_values[id_vol + id_edge*N];
q1 = xvel_edge_values[id_vol + id_edge*N];
q2 = yvel_edge_values[id_vol + id_edge*N];
#endif
xmom_boundary_values[id] = n1*r1 - n2*r2;
ymom_boundary_values[id] = n2*r1 + n1*r2;
r1 = q1*n1 + q2*n2;
r2 = q1*n2 - q2*n1;
xvel_boundary_values[id] = n1*r1 - n2*r2;
yvel_boundary_values[id] = n2*r1 + n1*r2;
}
__global__ void evaluate_segment_dirichlet_1(
int N,
long * ids,
long * vol_ids,
long * edge_ids,
double * boundary_values,
double *edge_values)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if (k >= N)
return;
int id = ids[k],
id_vol = vol_ids[k],
id_edge = edge_ids[k];
boundary_values[id] = edge_values[id_vol*3 + id_edge];
}
__global__ void evaluate_segment_dirichlet_2(
int N,
double q_bdry,
long * ids,
double * boundary_values)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
int id = ids[k];
if (k >= N)
return;
boundary_values[id] = q_bdry;
}
| ce21bb4bf4a0d4b583bf2f0010497f19f077f7f8.cu | //#define REARRANGED_DOMAIN
__global__ void evaluate_segment_reflective(
int N,
int Nids,
long * ids,
long * vol_ids,
long * edge_ids,
double * normals,
double * stage_edge_values,
double * bed_edge_values,
double * height_edge_values,
double * xmom_edge_values,
double * ymom_edge_values,
double * xvel_edge_values,
double * yvel_edge_values,
double * stage_boundary_values,
double * bed_boundary_values,
double * height_boundary_values,
double * xmom_boundary_values,
double * ymom_boundary_values,
double * xvel_boundary_values,
double * yvel_boundary_values)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if (k >= Nids)
return;
long id = ids[k],
id_vol = vol_ids[k], // Note here should be k, since we pass
id_edge= edge_ids[k]; // the vol_ids and edge_ids from CPU
#ifndef REARRANGED_DOMAIN
double n1 = normals[id_vol*6 + id_edge*2],
n2 = normals[id_vol*6 + id_edge*2 + 1];
double q1 = xmom_edge_values[id_vol*3 + id_edge],
q2 = ymom_edge_values[id_vol*3 + id_edge];
#else
double n1 = normals[id_vol + id_edge*2*N],
n2 = normals[id_vol + (id_edge*2+1)*N];
double q1 = xmom_edge_values[id_vol + id_edge*N],
q2 = ymom_edge_values[id_vol + id_edge*N];
#endif
double r1 = -q1*n1 - q2*n2,
r2 = -q1*n2 + q2*n1;
#ifndef REARRANGED_DOMAIN
stage_boundary_values[id] = stage_edge_values[id_vol*3 + id_edge];
bed_boundary_values[id] = bed_edge_values[id_vol*3 + id_edge];
height_boundary_values[id] = height_edge_values[id_vol*3 + id_edge];
q1 = xvel_edge_values[id_vol*3 + id_edge];
q2 = yvel_edge_values[id_vol*3 + id_edge];
#else
stage_boundary_values[id] = stage_edge_values[id_vol + id_edge*N];
bed_boundary_values[id] = bed_edge_values[id_vol + id_edge*N];
height_boundary_values[id] = height_edge_values[id_vol + id_edge*N];
q1 = xvel_edge_values[id_vol + id_edge*N];
q2 = yvel_edge_values[id_vol + id_edge*N];
#endif
xmom_boundary_values[id] = n1*r1 - n2*r2;
ymom_boundary_values[id] = n2*r1 + n1*r2;
r1 = q1*n1 + q2*n2;
r2 = q1*n2 - q2*n1;
xvel_boundary_values[id] = n1*r1 - n2*r2;
yvel_boundary_values[id] = n2*r1 + n1*r2;
}
__global__ void evaluate_segment_dirichlet_1(
int N,
long * ids,
long * vol_ids,
long * edge_ids,
double * boundary_values,
double *edge_values)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if (k >= N)
return;
int id = ids[k],
id_vol = vol_ids[k],
id_edge = edge_ids[k];
boundary_values[id] = edge_values[id_vol*3 + id_edge];
}
__global__ void evaluate_segment_dirichlet_2(
int N,
double q_bdry,
long * ids,
double * boundary_values)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
int id = ids[k];
if (k >= N)
return;
boundary_values[id] = q_bdry;
}
|
46c409334f849e4689636b5aaaf1337086ac5fec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel1_b2 [7][1];
static int dims_update_halo_kernel1_b2_h [7][1] = {0};
//user function
__device__
inline void update_halo_kernel1_b2_gpu(ACC<double> &density0,
ACC<double> &energy0,
ACC<double> &energy1,
ACC<double> &u,
ACC<double> &p,
ACC<double> &sd,
const int* fields) {
if(fields[FIELD_DENSITY] == 1) density0(0,0) = density0(0,3);
if(fields[FIELD_ENERGY0] == 1) energy0(0,0) = energy0(0,3);
if(fields[FIELD_ENERGY1] == 1) energy1(0,0) = energy1(0,3);
if(fields[FIELD_U] == 1) u(0,0) = u(0,3);
if(fields[FIELD_P] == 1) p(0,0) = p(0,3);
if(fields[FIELD_SD] == 1) sd(0,0) = sd(0,3);
}
__global__ void ops_update_halo_kernel1_b2(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
const int* __restrict arg6,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_b2[0][0];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_b2[1][0];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_b2[2][0];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_b2[3][0];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_b2[4][0];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_b2[5][0];
if (idx_x < size0 && idx_y < size1) {
ACC<double> argp0(dims_update_halo_kernel1_b2[0][0], arg0);
ACC<double> argp1(dims_update_halo_kernel1_b2[1][0], arg1);
ACC<double> argp2(dims_update_halo_kernel1_b2[2][0], arg2);
ACC<double> argp3(dims_update_halo_kernel1_b2[3][0], arg3);
ACC<double> argp4(dims_update_halo_kernel1_b2[4][0], arg4);
ACC<double> argp5(dims_update_halo_kernel1_b2[5][0], arg5);
update_halo_kernel1_b2_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_b2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_update_halo_kernel1_b2_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,7,range,49)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(49,"update_halo_kernel1_b2");
OPS_kernels[49].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[2];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 7,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != dims_update_halo_kernel1_b2_h[0][0] || xdim1 != dims_update_halo_kernel1_b2_h[1][0] || xdim2 != dims_update_halo_kernel1_b2_h[2][0] || xdim3 != dims_update_halo_kernel1_b2_h[3][0] || xdim4 != dims_update_halo_kernel1_b2_h[4][0] || xdim5 != dims_update_halo_kernel1_b2_h[5][0]) {
dims_update_halo_kernel1_b2_h[0][0] = xdim0;
dims_update_halo_kernel1_b2_h[1][0] = xdim1;
dims_update_halo_kernel1_b2_h[2][0] = xdim2;
dims_update_halo_kernel1_b2_h[3][0] = xdim3;
dims_update_halo_kernel1_b2_h[4][0] = xdim4;
dims_update_halo_kernel1_b2_h[5][0] = xdim5;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel1_b2, dims_update_halo_kernel1_b2_h, sizeof(dims_update_halo_kernel1_b2)));
}
int *arg6h = (int *)arg6.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg6.data = OPS_consts_h + consts_bytes;
arg6.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg6.data)[d] = arg6h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[7];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[49].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel1_b2), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(int *)arg6.data_d,x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[49].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[49].mpi_time += t2-t1;
OPS_kernels[49].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[49].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[49].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[49].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[49].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[49].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_b2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 49;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 49;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg*)malloc(7*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg6.data,NUM_FIELDS*sizeof(int));
desc->args[6].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_b2_execute;
if (OPS_diags > 1) {
ops_timing_realloc(49,"update_halo_kernel1_b2");
}
ops_enqueue_kernel(desc);
}
#endif
| 46c409334f849e4689636b5aaaf1337086ac5fec.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel1_b2 [7][1];
static int dims_update_halo_kernel1_b2_h [7][1] = {0};
//user function
__device__
inline void update_halo_kernel1_b2_gpu(ACC<double> &density0,
ACC<double> &energy0,
ACC<double> &energy1,
ACC<double> &u,
ACC<double> &p,
ACC<double> &sd,
const int* fields) {
if(fields[FIELD_DENSITY] == 1) density0(0,0) = density0(0,3);
if(fields[FIELD_ENERGY0] == 1) energy0(0,0) = energy0(0,3);
if(fields[FIELD_ENERGY1] == 1) energy1(0,0) = energy1(0,3);
if(fields[FIELD_U] == 1) u(0,0) = u(0,3);
if(fields[FIELD_P] == 1) p(0,0) = p(0,3);
if(fields[FIELD_SD] == 1) sd(0,0) = sd(0,3);
}
__global__ void ops_update_halo_kernel1_b2(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
const int* __restrict arg6,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_b2[0][0];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_b2[1][0];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_b2[2][0];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_b2[3][0];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_b2[4][0];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_b2[5][0];
if (idx_x < size0 && idx_y < size1) {
ACC<double> argp0(dims_update_halo_kernel1_b2[0][0], arg0);
ACC<double> argp1(dims_update_halo_kernel1_b2[1][0], arg1);
ACC<double> argp2(dims_update_halo_kernel1_b2[2][0], arg2);
ACC<double> argp3(dims_update_halo_kernel1_b2[3][0], arg3);
ACC<double> argp4(dims_update_halo_kernel1_b2[4][0], arg4);
ACC<double> argp5(dims_update_halo_kernel1_b2[5][0], arg5);
update_halo_kernel1_b2_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_b2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_update_halo_kernel1_b2_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,7,range,49)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(49,"update_halo_kernel1_b2");
OPS_kernels[49].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[2];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 7,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != dims_update_halo_kernel1_b2_h[0][0] || xdim1 != dims_update_halo_kernel1_b2_h[1][0] || xdim2 != dims_update_halo_kernel1_b2_h[2][0] || xdim3 != dims_update_halo_kernel1_b2_h[3][0] || xdim4 != dims_update_halo_kernel1_b2_h[4][0] || xdim5 != dims_update_halo_kernel1_b2_h[5][0]) {
dims_update_halo_kernel1_b2_h[0][0] = xdim0;
dims_update_halo_kernel1_b2_h[1][0] = xdim1;
dims_update_halo_kernel1_b2_h[2][0] = xdim2;
dims_update_halo_kernel1_b2_h[3][0] = xdim3;
dims_update_halo_kernel1_b2_h[4][0] = xdim4;
dims_update_halo_kernel1_b2_h[5][0] = xdim5;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel1_b2, dims_update_halo_kernel1_b2_h, sizeof(dims_update_halo_kernel1_b2)));
}
int *arg6h = (int *)arg6.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg6.data = OPS_consts_h + consts_bytes;
arg6.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg6.data)[d] = arg6h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[7];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[49].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
ops_update_halo_kernel1_b2<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(int *)arg6.data_d,x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[49].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[49].mpi_time += t2-t1;
OPS_kernels[49].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[49].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[49].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[49].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[49].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[49].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_b2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 49;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 49;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg*)malloc(7*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg6.data,NUM_FIELDS*sizeof(int));
desc->args[6].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_b2_execute;
if (OPS_diags > 1) {
ops_timing_realloc(49,"update_halo_kernel1_b2");
}
ops_enqueue_kernel(desc);
}
#endif
|
50c28e4dfecfacfa248064dfdd5d72552bcc62b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
//#include <atlimage.h>
enum color_transform_t
{
grayscale,
sRGB,
LAB
};
enum transform_t
{
Gaussian
};
#define SIZE 1000
//typedef struct
//{
// int r;
// int g;
// int b;
//} rgb_t;
//
//typedef rgb_t* rgb_ptr;
//typedef rgb_ptr* rgb_list;
//typedef rgb_list* rgb_mat;
//
hipError_t transform(uchar3 *dst_img, uchar3 *src_img, int img_size, int block_size, int grid_size, color_transform_t type);
hipError_t transform();
// convert one scanline to grayscale in parallel
__global__ void grayscale_transform(uchar3 *dst_img, uchar3 *src_img, int img_size)
{
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idx = y * SIZE + x;
uchar3 rgb = src_img[idx];
int average = (rgb.x + rgb.y + rgb.z) / 3;
dst_img[idx].x = average;
dst_img[idx].y = average;
dst_img[idx].z = average;
}
void host_grayscale(uchar3 *dst_img, uchar3 *src_img, int img_size)
{
for (int i = 0; i < SIZE * SIZE; i++)
{
uchar3 rgb = src_img[i];
int average = (rgb.x + rgb.y + rgb.z) / 3;
dst_img[i].x = average;
dst_img[i].y = average;
dst_img[i].z = average;
}
}
int main()
{
// genreate a dummy image
int size = SIZE * SIZE;
int img_size = size * sizeof(uchar3);
int block_size = size / SIZE;
int grid_size = size / block_size;
//CImage img;
uchar3 *src_img, *gray_img, srgb;
src_img = (uchar3*)malloc(img_size);
gray_img = (uchar3*)malloc(img_size);
for (int i = 0; i < SIZE * SIZE; i++)
{
uchar3 src, gray;
src.x = 128;
src.y = 64;
src.x = 256;
gray.x = 0;
gray.y = 0;
gray.z = 0;
src_img[i] = src;
gray_img[i] = gray;
}
hipError_t cudaStatus = transform(gray_img, src_img, img_size, block_size, grid_size, grayscale);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "cudadevicereset failed!");
return 1;
}
clock_t begin = clock();
host_grayscale(gray_img, src_img, img_size);
clock_t end = clock();
double time_spent = 1000 * (double)(end - begin) / CLOCKS_PER_SEC;
printf("CPU Execution Time: %32fms", time_spent);
free(gray_img);
free(src_img);
return 0;
system("pause");
return 0;
system("pause");
}
// transform an image
hipError_t transform(uchar3 *dst_img, uchar3 *src_img, int img_size, int block_size, int grid_size, color_transform_t type)
{
hipError_t cudaStatus;
uchar3 *t_src, *gpu_output;
cudaStatus = hipMalloc((void**)&t_src, img_size);
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMalloc failed!");
cudaStatus = hipMalloc((void**)&gpu_output, img_size);
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMalloc failed!");
cudaStatus = hipMemcpy(t_src, src_img, img_size, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMemcpy failed!");
float et;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
if (type == grayscale)
hipLaunchKernelGGL(( grayscale_transform), dim3(grid_size), dim3(block_size), 0, 0, gpu_output, t_src, img_size);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&et, start, stop);
printf("GPU Execution Time: %32fms\n", et);
//// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
//// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(dst_img, gpu_output, img_size, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//hipError_t transform(rgb_mat dst_img, rgb_mat src_img, unsigned int img_size, color_transform_t type);
//hipError_t transform(rgb_t ***image, unsigned int line, transform_t type);
//
//// convert one scanline to grayscale in parallel
//__global__ void grayscale_transform(rgb_list gpu_output, rgb_mat t_src, unsigned int line)
//{
// unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned int idx = y * 100 + x;
//
// rgb_ptr src_rgb = t_src[y][x];
//
// rgb_ptr gpu_rgb = nullptr;
// gpu_rgb->r = 0;
// gpu_rgb->g = 0;
// gpu_rgb->b = 0;
//
// double average = (src_rgb->r + src_rgb->g + src_rgb->b) / 3;
//
// gpu_rgb->r = average;
// gpu_rgb->g = average;
// gpu_rgb->b = average;
//
// gpu_output[idx] = gpu_rgb;
//}
//
//int main()
//{
// // genreate a dummy image
// int img_size = 100 * 100 * sizeof(rgb_ptr);
// int block_size = 4;
// int grid_size = 100000 / block_size;
//
// rgb_mat src_img = (rgb_list*)malloc(100 * sizeof(rgb_list));
// rgb_mat gray_img = (rgb_list*)malloc(100 * sizeof(rgb_list));
//
// for (int y = 0; y < 100; y++)
// {
// src_img[y] = (rgb_list)malloc(100 * sizeof(rgb_ptr));
// gray_img[y] = (rgb_list)malloc(100 * sizeof(rgb_ptr));
// for (int x = 0; x < 100; x++)
// {
// src_img[y][x] = (rgb_ptr)malloc(sizeof(rgb_ptr));
// gray_img[y][x] = (rgb_ptr)malloc(sizeof(rgb_ptr));
// }
// }
//
//
// int i, j;
// for (i = 0; i < 100; i++)
// {
// for (j = 0; j < 100; j++)
// {
// rgb_ptr s_p = src_img[i][j];
// s_p->r = 128.0;
// s_p->g = 76.0;
// s_p->b = 256.0;
// src_img[i][j] = s_p;
//
// rgb_ptr g_p = gray_img[i][j];
// g_p->r = 0.0;
// g_p->g = 0.0;
// g_p->b = 0.0;
// gray_img[i][j] = g_p;
// }
// }
//
// hipError_t cudaStatus = transform(gray_img, src_img, img_size, block_size, grid_size, grayscale);
// if (cudaStatus != hipSuccess)
// {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
// }
//
// //cudaStatus = hipDeviceReset();
// //if (cudaStatus != hipSuccess)
// //{
// // fprintf(stderr, "cudadevicereset failed!");
// // return 1;
// //}
//
// for (i = 0; i < 100; i++)
// {
// for (int j = 0; j < 100; j++)
// {
// rgb_ptr rgb = gray_img[i][j];
// printf("%.3f : %.3f : %.3f\n", rgb->r, rgb->g, rgb->b);
// }
// }
//
// free(gray_img);
// free(src_img);
//
// return 0;
// system("pause");
//}
//
//// transform an image
//hipError_t transform(rgb_mat dst_img, rgb_mat src_img, unsigned int img_size, int block_size, int grid_size, color_transform_t type)
//{
// rgb_mat t_src;
// rgb_list gpu_output;
//
// hipError_t cudaStatus;
//
// cudaStatus = hipMalloc((void**)&t_src, img_size);
// if (cudaStatus != hipSuccess)
// fprintf(stderr, "hipMalloc failed!");
//
// cudaStatus = hipMalloc((void**)&gpu_output, img_size);
// if (cudaStatus != hipSuccess)
// fprintf(stderr, "hipMalloc failed!");
//
// cudaStatus = hipMemcpy(t_src, src_img, img_size, hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess)
// fprintf(stderr, "hipMemcpy failed!");
//
// if (type == grayscale)
// hipLaunchKernelGGL(( grayscale_transform), dim3(grid_size), dim3(block_size), 0, 0, gpu_output, t_src, img_size);
//
// //// Check for any errors launching the kernel
// cudaStatus = hipGetLastError();
// if (cudaStatus != hipSuccess)
// fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
//
// // hipDeviceSynchronize waits for the kernel to finish, and returns
// // any errors encountered during the launch.
// cudaStatus = hipDeviceSynchronize();
// if (cudaStatus != hipSuccess)
// fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
//
// //// Copy output vector from GPU buffer to host memory.
// cudaStatus = hipMemcpy(dst_img, gpu_output, img_size, hipMemcpyDeviceToHost);
// if (cudaStatus != hipSuccess)
// fprintf(stderr, "hipMemcpy failed!");
//
// //cudaStatus = hipFree(gpu_output);
// //if (cudaStatus != hipSuccess)
// // fprintf(stderr, "hipFree failed!");
//
// return cudaStatus;
//}
/// HERE IS A WORKING EXAMPLE
//hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
//
//__global__ void addKernel(int *c, const int *a, const int *b)
//{
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
//}
//
//int main()
//{
// const int arraySize = 5;
// const int a[arraySize] = { 1, 2, 3, 4, 5 };
// const int b[arraySize] = { 10, 20, 30, 40, 50 };
// int c[arraySize] = { 0 };
//
// // Add vectors in parallel.
// hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
// }
//
// printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
//
// // hipDeviceReset must be called before exiting in order for profiling and
// // tracing tools such as Nsight and Visual Profiler to show complete traces.
// cudaStatus = hipDeviceReset();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipDeviceReset failed!");
// return 1;
// }
//
// return 0;
//}
//
//// Helper function for using CUDA to add vectors in parallel.
//hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
//{
// int *dev_a = 0;
// int *dev_b = 0;
// int *dev_c = 0;
// hipError_t cudaStatus;
//
// // Choose which GPU to run on, change this on a multi-GPU system.
// cudaStatus = hipSetDevice(0);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// // Copy input vectors from host memory to GPU buffers.
// cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
//
// cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
//
// // Launch a kernel on the GPU with one thread for each element.
// addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
//
// // Check for any errors launching the kernel
// cudaStatus = hipGetLastError();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
// goto Error;
// }
//
// // hipDeviceSynchronize waits for the kernel to finish, and returns
// // any errors encountered during the launch.
// cudaStatus = hipDeviceSynchronize();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
// goto Error;
// }
//
// // Copy output vector from GPU buffer to host memory.
// cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
//
//Error:
// hipFree(dev_c);
// hipFree(dev_a);
// hipFree(dev_b);
//
// return cudaStatus;
//}
| 50c28e4dfecfacfa248064dfdd5d72552bcc62b2.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
//#include <atlimage.h>
enum color_transform_t
{
grayscale,
sRGB,
LAB
};
enum transform_t
{
Gaussian
};
#define SIZE 1000
//typedef struct
//{
// int r;
// int g;
// int b;
//} rgb_t;
//
//typedef rgb_t* rgb_ptr;
//typedef rgb_ptr* rgb_list;
//typedef rgb_list* rgb_mat;
//
cudaError_t transform(uchar3 *dst_img, uchar3 *src_img, int img_size, int block_size, int grid_size, color_transform_t type);
cudaError_t transform();
// convert one scanline to grayscale in parallel
__global__ void grayscale_transform(uchar3 *dst_img, uchar3 *src_img, int img_size)
{
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idx = y * SIZE + x;
uchar3 rgb = src_img[idx];
int average = (rgb.x + rgb.y + rgb.z) / 3;
dst_img[idx].x = average;
dst_img[idx].y = average;
dst_img[idx].z = average;
}
void host_grayscale(uchar3 *dst_img, uchar3 *src_img, int img_size)
{
for (int i = 0; i < SIZE * SIZE; i++)
{
uchar3 rgb = src_img[i];
int average = (rgb.x + rgb.y + rgb.z) / 3;
dst_img[i].x = average;
dst_img[i].y = average;
dst_img[i].z = average;
}
}
int main()
{
// genreate a dummy image
int size = SIZE * SIZE;
int img_size = size * sizeof(uchar3);
int block_size = size / SIZE;
int grid_size = size / block_size;
//CImage img;
uchar3 *src_img, *gray_img, srgb;
src_img = (uchar3*)malloc(img_size);
gray_img = (uchar3*)malloc(img_size);
for (int i = 0; i < SIZE * SIZE; i++)
{
uchar3 src, gray;
src.x = 128;
src.y = 64;
src.x = 256;
gray.x = 0;
gray.y = 0;
gray.z = 0;
src_img[i] = src;
gray_img[i] = gray;
}
cudaError_t cudaStatus = transform(gray_img, src_img, img_size, block_size, grid_size, grayscale);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudadevicereset failed!");
return 1;
}
clock_t begin = clock();
host_grayscale(gray_img, src_img, img_size);
clock_t end = clock();
double time_spent = 1000 * (double)(end - begin) / CLOCKS_PER_SEC;
printf("CPU Execution Time: %32fms", time_spent);
free(gray_img);
free(src_img);
return 0;
system("pause");
return 0;
system("pause");
}
// transform an image
cudaError_t transform(uchar3 *dst_img, uchar3 *src_img, int img_size, int block_size, int grid_size, color_transform_t type)
{
cudaError_t cudaStatus;
uchar3 *t_src, *gpu_output;
cudaStatus = cudaMalloc((void**)&t_src, img_size);
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMalloc failed!");
cudaStatus = cudaMalloc((void**)&gpu_output, img_size);
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMalloc failed!");
cudaStatus = cudaMemcpy(t_src, src_img, img_size, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMemcpy failed!");
float et;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
if (type == grayscale)
grayscale_transform<<<grid_size, block_size>>>(gpu_output, t_src, img_size);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&et, start, stop);
printf("GPU Execution Time: %32fms\n", et);
//// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
//// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(dst_img, gpu_output, img_size, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//cudaError_t transform(rgb_mat dst_img, rgb_mat src_img, unsigned int img_size, color_transform_t type);
//cudaError_t transform(rgb_t ***image, unsigned int line, transform_t type);
//
//// convert one scanline to grayscale in parallel
//__global__ void grayscale_transform(rgb_list gpu_output, rgb_mat t_src, unsigned int line)
//{
// unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned int idx = y * 100 + x;
//
// rgb_ptr src_rgb = t_src[y][x];
//
// rgb_ptr gpu_rgb = nullptr;
// gpu_rgb->r = 0;
// gpu_rgb->g = 0;
// gpu_rgb->b = 0;
//
// double average = (src_rgb->r + src_rgb->g + src_rgb->b) / 3;
//
// gpu_rgb->r = average;
// gpu_rgb->g = average;
// gpu_rgb->b = average;
//
// gpu_output[idx] = gpu_rgb;
//}
//
//int main()
//{
// // genreate a dummy image
// int img_size = 100 * 100 * sizeof(rgb_ptr);
// int block_size = 4;
// int grid_size = 100000 / block_size;
//
// rgb_mat src_img = (rgb_list*)malloc(100 * sizeof(rgb_list));
// rgb_mat gray_img = (rgb_list*)malloc(100 * sizeof(rgb_list));
//
// for (int y = 0; y < 100; y++)
// {
// src_img[y] = (rgb_list)malloc(100 * sizeof(rgb_ptr));
// gray_img[y] = (rgb_list)malloc(100 * sizeof(rgb_ptr));
// for (int x = 0; x < 100; x++)
// {
// src_img[y][x] = (rgb_ptr)malloc(sizeof(rgb_ptr));
// gray_img[y][x] = (rgb_ptr)malloc(sizeof(rgb_ptr));
// }
// }
//
//
// int i, j;
// for (i = 0; i < 100; i++)
// {
// for (j = 0; j < 100; j++)
// {
// rgb_ptr s_p = src_img[i][j];
// s_p->r = 128.0;
// s_p->g = 76.0;
// s_p->b = 256.0;
// src_img[i][j] = s_p;
//
// rgb_ptr g_p = gray_img[i][j];
// g_p->r = 0.0;
// g_p->g = 0.0;
// g_p->b = 0.0;
// gray_img[i][j] = g_p;
// }
// }
//
// cudaError_t cudaStatus = transform(gray_img, src_img, img_size, block_size, grid_size, grayscale);
// if (cudaStatus != cudaSuccess)
// {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
// }
//
// //cudaStatus = cudaDeviceReset();
// //if (cudaStatus != cudaSuccess)
// //{
// // fprintf(stderr, "cudadevicereset failed!");
// // return 1;
// //}
//
// for (i = 0; i < 100; i++)
// {
// for (int j = 0; j < 100; j++)
// {
// rgb_ptr rgb = gray_img[i][j];
// printf("%.3f : %.3f : %.3f\n", rgb->r, rgb->g, rgb->b);
// }
// }
//
// free(gray_img);
// free(src_img);
//
// return 0;
// system("pause");
//}
//
//// transform an image
//cudaError_t transform(rgb_mat dst_img, rgb_mat src_img, unsigned int img_size, int block_size, int grid_size, color_transform_t type)
//{
// rgb_mat t_src;
// rgb_list gpu_output;
//
// cudaError_t cudaStatus;
//
// cudaStatus = cudaMalloc((void**)&t_src, img_size);
// if (cudaStatus != cudaSuccess)
// fprintf(stderr, "cudaMalloc failed!");
//
// cudaStatus = cudaMalloc((void**)&gpu_output, img_size);
// if (cudaStatus != cudaSuccess)
// fprintf(stderr, "cudaMalloc failed!");
//
// cudaStatus = cudaMemcpy(t_src, src_img, img_size, cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess)
// fprintf(stderr, "cudaMemcpy failed!");
//
// if (type == grayscale)
// grayscale_transform<<<grid_size, block_size>>>(gpu_output, t_src, img_size);
//
// //// Check for any errors launching the kernel
// cudaStatus = cudaGetLastError();
// if (cudaStatus != cudaSuccess)
// fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
//
// // cudaDeviceSynchronize waits for the kernel to finish, and returns
// // any errors encountered during the launch.
// cudaStatus = cudaDeviceSynchronize();
// if (cudaStatus != cudaSuccess)
// fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
//
// //// Copy output vector from GPU buffer to host memory.
// cudaStatus = cudaMemcpy(dst_img, gpu_output, img_size, cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess)
// fprintf(stderr, "cudaMemcpy failed!");
//
// //cudaStatus = cudaFree(gpu_output);
// //if (cudaStatus != cudaSuccess)
// // fprintf(stderr, "cudaFree failed!");
//
// return cudaStatus;
//}
/// HERE IS A WORKING EXAMPLE
//cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
//
//__global__ void addKernel(int *c, const int *a, const int *b)
//{
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
//}
//
//int main()
//{
// const int arraySize = 5;
// const int a[arraySize] = { 1, 2, 3, 4, 5 };
// const int b[arraySize] = { 10, 20, 30, 40, 50 };
// int c[arraySize] = { 0 };
//
// // Add vectors in parallel.
// cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
// }
//
// printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
//
// // cudaDeviceReset must be called before exiting in order for profiling and
// // tracing tools such as Nsight and Visual Profiler to show complete traces.
// cudaStatus = cudaDeviceReset();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceReset failed!");
// return 1;
// }
//
// return 0;
//}
//
//// Helper function for using CUDA to add vectors in parallel.
//cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
//{
// int *dev_a = 0;
// int *dev_b = 0;
// int *dev_c = 0;
// cudaError_t cudaStatus;
//
// // Choose which GPU to run on, change this on a multi-GPU system.
// cudaStatus = cudaSetDevice(0);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// // Copy input vectors from host memory to GPU buffers.
// cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
// cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
// // Launch a kernel on the GPU with one thread for each element.
// addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
//
// // Check for any errors launching the kernel
// cudaStatus = cudaGetLastError();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
// goto Error;
// }
//
// // cudaDeviceSynchronize waits for the kernel to finish, and returns
// // any errors encountered during the launch.
// cudaStatus = cudaDeviceSynchronize();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
// goto Error;
// }
//
// // Copy output vector from GPU buffer to host memory.
// cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
//Error:
// cudaFree(dev_c);
// cudaFree(dev_a);
// cudaFree(dev_b);
//
// return cudaStatus;
//}
|
101d5e1afa8848425654c1a38c127a1f7bdd4b96.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
//macros
#define MAX_FILENAME_SIZE 256
#define MAX_TEST_SIZE 1000
//solve the RNA prediction problem
hipError_t solverRNA(const char *,int *);
__device__ bool canPair(int base1,int base2) {
bool case1, case2;
case1 = (base1 == 67 && base2 == 71 ) || (base1 == 71 && base2 == 67);
case2 = (base1 == 65 && base2 == 85) || (base1 == 85 && base2 == 65);
return (case1||case2);
}
__global__ void solverKernel(int *dev_data,int*dev_memo,int size)
{
int i,j,opt;
i = threadIdx.x;
for(int k = 5 ; k < size ;k++){
if(i<size-k){
j = i + k;
dev_memo[size*i + j] = dev_memo[size*i + (j - 1)];
for (int t = i; t < j - 4; t++) { //opt(i,j)=max(opt(i,j-1),1+opt(i,t-1)+opt(t+1,j-1))
if (canPair(dev_data[t], dev_data[j])) {
if (t == 0) {
opt = 1 + dev_memo[size*(t + 1)+(j-1)];
}
else {
opt = 1 + dev_memo[i*size+(t-1)] + dev_memo[size*(t+1)+(j-1)];
}
if (opt > dev_memo[size*i+j]) {
dev_memo[i*size+j] = opt;
}
}
}
}
__syncthreads();
}
}
int main()
{
FILE *input;
char *filename;
char testRNA[MAX_TEST_SIZE];
int result;
hipError_t cudaStatus;
//Memory Allocation to file name
filename = (char*)malloc(MAX_FILENAME_SIZE*sizeof(char));
//Reading filename
printf("Write name of input file : ");
scanf("%s", filename);
//Open File to read input test data
input = fopen(filename, "r");
//Testing input opening
if (input == NULL) {
printf("Error opening file, please try again.");
return 1;
}
printf("\n\n---------------- Begin Tests --------------------\n\n");
//Begin reading file and testing
while (fscanf(input, "%s",testRNA)!=EOF) {
//launch solverRNA
cudaStatus = solverRNA(testRNA,&result);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "solverRNA failed!");
return 1;
}
printf("%s : ", testRNA);
printf("%d base pairs.\n",result);
}
printf("\n\n---------------- Ending Tests --------------------\n\n");
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
system("pause");
return 0;
}
// Helper function for using CUDA to solve RNA prediction in parallel with function objective maximum number of bases
hipError_t solverRNA(const char *data,int *result)
{
int *dev_data = 0;//data in device
int *dev_memo = 0;//memotable in device
int *host_memo = 0;//memotable in host
int *host_data = 0;
int size = strlen(data);
const int size_memo = size*size;
hipError_t cudaStatus;
//convert string to array of integers
host_data = (int*)malloc(size*sizeof(int));
for(int i = 0;i < size ;++i) host_data[i]=(int)data[i];
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate CPU buffer to memoTable
host_memo = (int *)calloc(size_memo,sizeof(int));
// Allocate GPU buffer to memoTable
cudaStatus = hipMalloc((void**)&dev_data, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_memo, size_memo*sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_memo, host_memo , size_memo * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_data, host_data, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( solverKernel) , dim3(1), dim3(size) , 0, 0, dev_data, dev_memo, size);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "solverKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching solverKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(host_memo, dev_memo, size_memo*sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
*result=host_memo[size-1];
Error:
hipFree(dev_memo);
hipFree(dev_data);
free(host_data);
free(host_memo);
return cudaStatus;
}
| 101d5e1afa8848425654c1a38c127a1f7bdd4b96.cu |
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
//macros
#define MAX_FILENAME_SIZE 256
#define MAX_TEST_SIZE 1000
//solve the RNA prediction problem
cudaError_t solverRNA(const char *,int *);
__device__ bool canPair(int base1,int base2) {
bool case1, case2;
case1 = (base1 == 67 && base2 == 71 ) || (base1 == 71 && base2 == 67);
case2 = (base1 == 65 && base2 == 85) || (base1 == 85 && base2 == 65);
return (case1||case2);
}
__global__ void solverKernel(int *dev_data,int*dev_memo,int size)
{
int i,j,opt;
i = threadIdx.x;
for(int k = 5 ; k < size ;k++){
if(i<size-k){
j = i + k;
dev_memo[size*i + j] = dev_memo[size*i + (j - 1)];
for (int t = i; t < j - 4; t++) { //opt(i,j)=max(opt(i,j-1),1+opt(i,t-1)+opt(t+1,j-1))
if (canPair(dev_data[t], dev_data[j])) {
if (t == 0) {
opt = 1 + dev_memo[size*(t + 1)+(j-1)];
}
else {
opt = 1 + dev_memo[i*size+(t-1)] + dev_memo[size*(t+1)+(j-1)];
}
if (opt > dev_memo[size*i+j]) {
dev_memo[i*size+j] = opt;
}
}
}
}
__syncthreads();
}
}
int main()
{
FILE *input;
char *filename;
char testRNA[MAX_TEST_SIZE];
int result;
cudaError_t cudaStatus;
//Memory Allocation to file name
filename = (char*)malloc(MAX_FILENAME_SIZE*sizeof(char));
//Reading filename
printf("Write name of input file : ");
scanf("%s", filename);
//Open File to read input test data
input = fopen(filename, "r");
//Testing input opening
if (input == NULL) {
printf("Error opening file, please try again.");
return 1;
}
printf("\n\n---------------- Begin Tests --------------------\n\n");
//Begin reading file and testing
while (fscanf(input, "%s",testRNA)!=EOF) {
//launch solverRNA
cudaStatus = solverRNA(testRNA,&result);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "solverRNA failed!");
return 1;
}
printf("%s : ", testRNA);
printf("%d base pairs.\n",result);
}
printf("\n\n---------------- Ending Tests --------------------\n\n");
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
system("pause");
return 0;
}
// Helper function for using CUDA to solve RNA prediction in parallel with function objective maximum number of bases
cudaError_t solverRNA(const char *data,int *result)
{
int *dev_data = 0;//data in device
int *dev_memo = 0;//memotable in device
int *host_memo = 0;//memotable in host
int *host_data = 0;
int size = strlen(data);
const int size_memo = size*size;
cudaError_t cudaStatus;
//convert string to array of integers
host_data = (int*)malloc(size*sizeof(int));
for(int i = 0;i < size ;++i) host_data[i]=(int)data[i];
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate CPU buffer to memoTable
host_memo = (int *)calloc(size_memo,sizeof(int));
// Allocate GPU buffer to memoTable
cudaStatus = cudaMalloc((void**)&dev_data, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_memo, size_memo*sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_memo, host_memo , size_memo * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_data, host_data, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
solverKernel <<< 1, size >>> (dev_data, dev_memo, size);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "solverKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching solverKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(host_memo, dev_memo, size_memo*sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
*result=host_memo[size-1];
Error:
cudaFree(dev_memo);
cudaFree(dev_data);
free(host_data);
free(host_memo);
return cudaStatus;
}
|
3588b5c728ad981c5a4701b8e9921da146fffc3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(int argc, char** argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 )
printf( "The file was not opened\n" );
for (i=0; i < grid_rows; i++)
for (j=0; j < grid_cols; j++)
{
sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]);
fputs(str,fp);
index++;
}
fclose(fp);
}
void readinput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
{
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
//if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j] = val;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(int iteration, //number of iteration
float *power, //power input
float *temp_src, //temperature input/output
float *temp_dst, //temperature input/output
int grid_cols, //Col of grid
int grid_rows, //Row of grid
int border_cols, // border offset
int border_rows, // border offset
float Cap, //Capacitance
float Rx,
float Ry,
float Rz,
float step,
float time_elapsed){
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1,Ry_1,Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
step_div_Cap=step/Cap;
Rx_1=1/Rx;
Ry_1=1/Ry;
Rz_1=1/Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE
int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows*by-border_rows;
int blkX = small_block_cols*bx-border_cols;
int blkYmax = blkY+BLOCK_SIZE-1;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int yidx = blkY+ty;
int xidx = blkX+tx;
// load data if it is within the valid input range
int loadYidx=yidx, loadXidx=xidx;
int index = grid_cols*loadYidx+loadXidx;
if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory
power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
int N = ty-1;
int S = ty+1;
int W = tx-1;
int E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(tx, validXmin, validXmax) && \
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] =
temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
temp_on_cuda[ty][tx]= temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
temp_dst[index]= temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \
int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed=0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t+=num_iterations) {
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\
col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed);
}
return dst;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char** argv)
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
int size;
int grid_rows,grid_cols;
float *FilesavingTemp,*FilesavingPower,*MatrixOut;
char *tfile, *pfile, *ofile;
int total_iterations = 60;
int pyramid_height = 1; // number of iterations
if (argc != 7)
usage(argc, argv);
if((grid_rows = atoi(argv[1]))<=0||
(grid_cols = atoi(argv[1]))<=0||
(pyramid_height = atoi(argv[2]))<=0||
(total_iterations = atoi(argv[3]))<=0)
usage(argc, argv);
tfile=argv[4];
pfile=argv[5];
ofile=argv[6];
size=grid_rows*grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE/2;
int borderRows = (pyramid_height)*EXPAND_RATE/2;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1);
int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1);
FilesavingTemp = (float *) malloc(size*sizeof(float));
FilesavingPower = (float *) malloc(size*sizeof(float));
MatrixOut = (float *) calloc (size, sizeof(float));
if( !FilesavingPower || !FilesavingTemp || !MatrixOut)
fatal("unable to allocate memory");
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow);
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
float *MatrixTemp[2], *MatrixPower;
hipMalloc((void**)&MatrixTemp[0], sizeof(float)*size);
hipMalloc((void**)&MatrixTemp[1], sizeof(float)*size);
hipMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, hipMemcpyHostToDevice);
hipMalloc((void**)&MatrixPower, sizeof(float)*size);
hipMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, hipMemcpyHostToDevice);
printf("Start computing the transient temperature\n");
int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \
total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows);
printf("Ending simulation\n");
hipMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, hipMemcpyDeviceToHost);
writeoutput(MatrixOut,grid_rows, grid_cols, ofile);
hipFree(MatrixPower);
hipFree(MatrixTemp[0]);
hipFree(MatrixTemp[1]);
free(MatrixOut);
}
| 3588b5c728ad981c5a4701b8e9921da146fffc3e.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(int argc, char** argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 )
printf( "The file was not opened\n" );
for (i=0; i < grid_rows; i++)
for (j=0; j < grid_cols; j++)
{
sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]);
fputs(str,fp);
index++;
}
fclose(fp);
}
void readinput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
{
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
//if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j] = val;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(int iteration, //number of iteration
float *power, //power input
float *temp_src, //temperature input/output
float *temp_dst, //temperature input/output
int grid_cols, //Col of grid
int grid_rows, //Row of grid
int border_cols, // border offset
int border_rows, // border offset
float Cap, //Capacitance
float Rx,
float Ry,
float Rz,
float step,
float time_elapsed){
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1,Ry_1,Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
step_div_Cap=step/Cap;
Rx_1=1/Rx;
Ry_1=1/Ry;
Rz_1=1/Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE
int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows*by-border_rows;
int blkX = small_block_cols*bx-border_cols;
int blkYmax = blkY+BLOCK_SIZE-1;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int yidx = blkY+ty;
int xidx = blkX+tx;
// load data if it is within the valid input range
int loadYidx=yidx, loadXidx=xidx;
int index = grid_cols*loadYidx+loadXidx;
if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory
power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
int N = ty-1;
int S = ty+1;
int W = tx-1;
int E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(tx, validXmin, validXmax) && \
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] =
temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
temp_on_cuda[ty][tx]= temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
temp_dst[index]= temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \
int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed=0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t+=num_iterations) {
int temp = src;
src = dst;
dst = temp;
calculate_temp<<<dimGrid, dimBlock>>>(MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\
col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed);
}
return dst;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char** argv)
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
int size;
int grid_rows,grid_cols;
float *FilesavingTemp,*FilesavingPower,*MatrixOut;
char *tfile, *pfile, *ofile;
int total_iterations = 60;
int pyramid_height = 1; // number of iterations
if (argc != 7)
usage(argc, argv);
if((grid_rows = atoi(argv[1]))<=0||
(grid_cols = atoi(argv[1]))<=0||
(pyramid_height = atoi(argv[2]))<=0||
(total_iterations = atoi(argv[3]))<=0)
usage(argc, argv);
tfile=argv[4];
pfile=argv[5];
ofile=argv[6];
size=grid_rows*grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE/2;
int borderRows = (pyramid_height)*EXPAND_RATE/2;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1);
int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1);
FilesavingTemp = (float *) malloc(size*sizeof(float));
FilesavingPower = (float *) malloc(size*sizeof(float));
MatrixOut = (float *) calloc (size, sizeof(float));
if( !FilesavingPower || !FilesavingTemp || !MatrixOut)
fatal("unable to allocate memory");
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow);
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
float *MatrixTemp[2], *MatrixPower;
cudaMalloc((void**)&MatrixTemp[0], sizeof(float)*size);
cudaMalloc((void**)&MatrixTemp[1], sizeof(float)*size);
cudaMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&MatrixPower, sizeof(float)*size);
cudaMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, cudaMemcpyHostToDevice);
printf("Start computing the transient temperature\n");
int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \
total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows);
printf("Ending simulation\n");
cudaMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, cudaMemcpyDeviceToHost);
writeoutput(MatrixOut,grid_rows, grid_cols, ofile);
cudaFree(MatrixPower);
cudaFree(MatrixTemp[0]);
cudaFree(MatrixTemp[1]);
free(MatrixOut);
}
|
a97995b8b1c8201d5d88f50666576c7bf082c637.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_reduce.h"
#include "saber/funcs/impl/cuda/vender_reduce.h"
namespace anakin {
namespace saber {
namespace {
template <ReduceType type>
class ReOp {
public:
__device__
static float compute(float a, float b) {
return -1.f;
}
};
template <>
__device__
float ReOp<Reduce_max>::compute(float a, float b) {
return ((a > b) ? a : b);
}
template <>
__device__
float ReOp<Reduce_min>::compute(float a, float b) {
return ((a > b) ? b : a);
}
template <>
__device__
float ReOp<Reduce_sum>::compute(float a, float b) {
return a + b;
}
template <>
__device__
float ReOp<Reduce_avg>::compute(float a, float b) {
return a + b;
}
template <>
__device__
float ReOp<Reduce_prod>::compute(float a, float b) {
return a * b;
}
template <int nDim>
class IndexCompute {
public:
__device__
static int input_idx(const int* dims,
const int* odims,
int out_idx);
};
template <>
__device__
int IndexCompute<4>::input_idx(
const int* in_stride,
const int* out_stride,
int out_idx) {
int i0 = out_idx / out_stride[0];
int i1 = (out_idx % out_stride[0]) / out_stride[1];
int i2 = (out_idx % out_stride[1]) / out_stride[2];
int i3 = (out_idx % out_stride[2]) / out_stride[3];
int idx = i0 * in_stride[0]
+ i1 * in_stride[1]
+ i2 * in_stride[2]
+ i3 * in_stride[3];
return idx;
}
template <>
__device__
int IndexCompute<3>::input_idx(
const int* in_stride,
const int* out_stride,
int out_idx) {
int i0 = out_idx / out_stride[0];
int i1 = (out_idx % out_stride[0]) / out_stride[1];
int i2 = (out_idx % out_stride[1]) / out_stride[2];
int idx = i0 * in_stride[0]
+ i1 * in_stride[1]
+ i2 * in_stride[2];
return idx;
}
template <>
__device__
int IndexCompute<2>::input_idx(
const int* in_stride,
const int* out_stride,
int out_idx) {
int i0 = out_idx / out_stride[0];
int i1 = (out_idx % out_stride[0]) / out_stride[1];
int idx = i0 * in_stride[0]
+ i1 * in_stride[1];
return idx;
}
template <>
__device__
int IndexCompute<1>::input_idx(
const int* in_stride,
const int* out_stride,
int out_idx) {
int i0 = out_idx / out_stride[0];
int idx = i0 * in_stride[0];
return idx;
}
// if you are reading this, there are still a lot
// optimize here to do, This class is the right class
// to make parallel reduction.
// the compute function can run inside one block,
// try to use shuffle instruction here.
// int tdim is the threads num of one block.
template <int rdim, int tdim, ReduceType type>
class ReduceCompute{
public:
__device__
static float compute(
const int* dims,
const int* rdims,
const int* in_stride,
const float* in_data, int in_idx) {
return 0;
}
};
template <int tdim, ReduceType type>
class ReduceCompute<1, tdim, type> {
public:
__device__
static float compute(
const int* dims,
const int* rdims,
const int* in_stride,
const float *in_data, int in_idx) {
// int tid = threadIdx.x;
float res = in_data[in_idx];
int idx = in_idx + in_stride[rdims[0]];
// here is the reduction op.
for (int i = 1; i < dims[rdims[0]]; ++i) {
res = ReOp<type>::compute(res, in_data[idx]);
idx += in_stride[rdims[0]];
}
return res;
}
};
template <int tdim, ReduceType type>
class ReduceCompute<2, tdim, type> {
public:
__device__
static float compute(
const int* dims,
const int* rdims,
const int* in_stride,
const float *in_data, int in_idx) {
float res0 = 0.f;
int idx0 = in_idx;
for (int i = 0; i < dims[rdims[0]]; ++i) {
float res1 = in_data[idx0];
int idx1 = idx0 + in_stride[rdims[1]];
for (int j = 1; j < dims[rdims[1]]; ++j) {
res1 = ReOp<type>::compute(res1, in_data[idx1]);
idx1 += in_stride[rdims[1]];
}
idx0 += in_stride[rdims[0]];
if (i == 0) {
res0 = res1;
} else {
res0 = ReOp<type>::compute(res0, res1);
}
}
return res0;
}
};
template <int tdim, ReduceType type>
class ReduceCompute<3, tdim, type> {
public:
__device__
static float compute(
const int* dims,
const int* rdims,
const int* in_stride,
const float *in_data, int in_idx) {
float res0 = 0.f;
int idx0 = in_idx;
for (int i = 0; i < dims[rdims[0]]; ++i) {
float res1 = 0.f;
int idx1 = idx0;
for (int j = 0; j < dims[rdims[1]]; ++j) {
float res2 = in_data[idx1];
int idx2 = idx1 + in_stride[rdims[2]];
for (int k = 1; k < dims[rdims[2]]; ++k) {
res2 = ReOp<type>::compute(res2, in_data[idx2]);
idx2 += in_stride[rdims[2]];
}
if (j == 0) {
res1 = res2;
} else {
res1 = ReOp<type>::compute(res1, res2);
}
idx1 += in_stride[rdims[1]];
}
if (i == 0) {
res0 = res1;
} else {
res0 = ReOp<type>::compute(res0, res1);
}
idx0 += in_stride[rdims[0]];
}
return res0;
}
};
template <int tdim, ReduceType type>
class ReduceCompute<4, tdim, type> {
public:
__device__
static float compute(
const int* dims,
const int* rdims,
const int* in_stride,
const float *in_data, int in_idx) {
float res0 = 0.f;
int idx0 = in_idx;
for (int i = 0; i < dims[rdims[0]]; ++i) {
float res1 = 0.f;
int idx1 = idx0;
for (int j = 0; j < dims[rdims[1]]; ++j) {
float res2 = 0.f;
int idx2 = idx1;
for (int k = 0; k < dims[rdims[2]]; ++k) {
float res3 = in_data[idx2];
int idx3 = idx2 + in_stride[rdims[3]];
for (int u = 0; u < dims[rdims[3]]; ++u) {
res3 = ReOp<type>::compute(res3, in_data[idx3]);
idx3 += in_stride[rdims[3]];
}
if (k == 0) {
res2 = res3;
} else {
res2 = ReOp<type>::compute(res2, res3);
}
idx2 += in_stride[rdims[2]];
}
if (j == 0) {
res1 = res2;
} else {
res1 = ReOp<type>::compute(res1, res2);
}
idx1 += in_stride[rdims[1]];
}
if (i == 0) {
res0 = res1;
} else {
res0 = ReOp<type>::compute(res0, res1);
}
idx0 += in_stride[rdims[0]];
}
return res0;
}
};
template <typename dtype,
ReduceType type,
int nDim,
int rDim>
__global__ void reduce(
const dtype* src,
dtype* dst,
const int* rdim,
const int* dims,
const int* i_stride,
const int* o_stride, int out_size) {
int reduce_size = 1;
for (int i = 0; i < rDim; ++i) {
reduce_size *= dims[rdim[i]];
}
float reduce_size_1 = 1.f / ((float)reduce_size);
int bid = blockIdx.x;
int out_idx = bid;
//init;
int in_idx = IndexCompute<nDim>::input_idx(i_stride, o_stride, out_idx);
float res = ReduceCompute<rDim, CUDA_NUM_THREADS, type>::compute(
dims, rdim, i_stride, src, in_idx);
dst[out_idx] = res;
if (Reduce_avg == type) {
dst[out_idx] *= reduce_size_1;
}
}
__global__
void reduce_unknow(
const float* src,
float* dst,
const int* rdim,
const int* dims,
const int* i_stride,
const int* o_stride, int out_size) {return;}
template <typename dtype,
ReduceType type,
int nDim,
int rDim>
__global__ void reduce_all(
const dtype* src,
dtype* dst,
const int* rdim,
const int* dims,
const int* i_stride,
const int* o_stride,
int out_size) {
int reduce_size = 1;
for (int i = 0; i < rDim; ++i) {
reduce_size *= dims[rdim[i]];
}
float reduce_size_1 = 1.f / ((float)reduce_size);
//init;
float res = src[0];
for (int i = 1; i < reduce_size; ++i) {
res = ReOp<type>::compute(res, src[i]);
}
dst[0] = res;
if (Reduce_avg == type) {
dst[0] *= reduce_size_1;
}
}
}
#define REG_REDUCE_TYPE_KERNEL(REDUCE_TYPE) \
_kernel_direct_map[REDUCE_TYPE] = { \
{reduce_unknow}, \
{reduce_unknow, \
reduce_all<float, REDUCE_TYPE, 1, 1>}, \
{reduce_unknow, \
reduce<float, REDUCE_TYPE, 2, 1>, \
reduce_all<float, REDUCE_TYPE, 2, 2>}, \
{reduce_unknow, \
reduce<float, REDUCE_TYPE, 3, 1>, \
reduce<float, REDUCE_TYPE, 3, 2>, \
reduce_all<float, REDUCE_TYPE, 3, 3>}, \
{reduce_unknow, \
reduce<float, REDUCE_TYPE, 4, 1>, \
reduce<float, REDUCE_TYPE, 4, 2>, \
reduce<float, REDUCE_TYPE, 4, 3>, \
reduce_all<float, REDUCE_TYPE, 4, 4>}}
template <typename dtype>
void async_copy_to_buffer(Buffer<NV> &buffer,
dtype* data, unsigned long size, hipStream_t stream) {
buffer.re_alloc(size * sizeof(dtype));
hipMemcpyAsync(buffer.get_data_mutable(), data,
size * sizeof(dtype), hipMemcpyHostToDevice, stream);
}
template <>
SaberStatus SaberReduce<NV, AK_FLOAT>::create(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
ReduceParam<NV>& param, Context<NV>& ctx) {
this->_ctx = &ctx;
if (_template_reduction) {
auto stream = _ctx->get_compute_stream();
auto i_stride = inputs[0]->get_stride();
auto o_stride = outputs[0]->get_stride();
std::vector<int> ndim(inputs[0]->valid_shape());
async_copy_to_buffer<int>(_rdim_b,
param.reduce_dim.data(),
param.reduce_dim.size(), stream);
async_copy_to_buffer<int>(_ndim_b,
inputs[0]->valid_shape().data(),
inputs[0]->valid_shape().size(), stream);
async_copy_to_buffer<int>(_i_stride_b,
i_stride.data(), i_stride.size(), stream);
async_copy_to_buffer<int>(_o_stride_b,
o_stride.data(), o_stride.size(), stream);
return SaberSuccess;
} else {
return _impl->create(inputs, outputs, param, ctx);
}
}
template <>
SaberStatus SaberReduce<NV, AK_FLOAT>::init(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
ReduceParam<NV>& param, Context<NV>& ctx) {
this->_ctx = &ctx;
if (_template_reduction) {
REG_REDUCE_TYPE_KERNEL(Reduce_avg);
REG_REDUCE_TYPE_KERNEL(Reduce_min);
REG_REDUCE_TYPE_KERNEL(Reduce_max);
REG_REDUCE_TYPE_KERNEL(Reduce_sum);
REG_REDUCE_TYPE_KERNEL(Reduce_prod);
} else {
_impl = new VenderReduce<NV, AK_FLOAT>;
_impl->init(inputs, outputs, param, ctx);
}
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberReduce<NV, AK_FLOAT>::dispatch(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
ReduceParam<NV>& param) {
if (_template_reduction) {
int out_size = outputs[0]->valid_size();
_kernel_direct_map[param.reduce_type]
[inputs[0]->dims()]
[param.reduce_dim.size()] << < out_size, 1,
0, _ctx->get_compute_stream() >> > (
(const float *) inputs[0]->data(),
(float *) outputs[0]->mutable_data(),
(const int *) _rdim_b.get_data(),
(const int *) _ndim_b.get_data(),
(const int *) _i_stride_b.get_data(),
(const int *) _o_stride_b.get_data(),
outputs[0]->valid_size());
return SaberSuccess;
} else {
return _impl->dispatch(inputs, outputs, param);
}
}
template class SaberReduce<NV, AK_FLOAT>;
DEFINE_OP_TEMPLATE(SaberReduce, ReduceParam, NV, AK_HALF);
DEFINE_OP_TEMPLATE(SaberReduce, ReduceParam, NV, AK_INT8);
} // namespace saber.
} // namespace anakin.
| a97995b8b1c8201d5d88f50666576c7bf082c637.cu |
#include "saber/funcs/impl/cuda/saber_reduce.h"
#include "saber/funcs/impl/cuda/vender_reduce.h"
namespace anakin {
namespace saber {
namespace {
template <ReduceType type>
class ReOp {
public:
__device__
static float compute(float a, float b) {
return -1.f;
}
};
template <>
__device__
float ReOp<Reduce_max>::compute(float a, float b) {
return ((a > b) ? a : b);
}
template <>
__device__
float ReOp<Reduce_min>::compute(float a, float b) {
return ((a > b) ? b : a);
}
template <>
__device__
float ReOp<Reduce_sum>::compute(float a, float b) {
return a + b;
}
template <>
__device__
float ReOp<Reduce_avg>::compute(float a, float b) {
return a + b;
}
template <>
__device__
float ReOp<Reduce_prod>::compute(float a, float b) {
return a * b;
}
template <int nDim>
class IndexCompute {
public:
__device__
static int input_idx(const int* dims,
const int* odims,
int out_idx);
};
template <>
__device__
int IndexCompute<4>::input_idx(
const int* in_stride,
const int* out_stride,
int out_idx) {
int i0 = out_idx / out_stride[0];
int i1 = (out_idx % out_stride[0]) / out_stride[1];
int i2 = (out_idx % out_stride[1]) / out_stride[2];
int i3 = (out_idx % out_stride[2]) / out_stride[3];
int idx = i0 * in_stride[0]
+ i1 * in_stride[1]
+ i2 * in_stride[2]
+ i3 * in_stride[3];
return idx;
}
template <>
__device__
int IndexCompute<3>::input_idx(
const int* in_stride,
const int* out_stride,
int out_idx) {
int i0 = out_idx / out_stride[0];
int i1 = (out_idx % out_stride[0]) / out_stride[1];
int i2 = (out_idx % out_stride[1]) / out_stride[2];
int idx = i0 * in_stride[0]
+ i1 * in_stride[1]
+ i2 * in_stride[2];
return idx;
}
template <>
__device__
int IndexCompute<2>::input_idx(
const int* in_stride,
const int* out_stride,
int out_idx) {
int i0 = out_idx / out_stride[0];
int i1 = (out_idx % out_stride[0]) / out_stride[1];
int idx = i0 * in_stride[0]
+ i1 * in_stride[1];
return idx;
}
template <>
__device__
int IndexCompute<1>::input_idx(
const int* in_stride,
const int* out_stride,
int out_idx) {
int i0 = out_idx / out_stride[0];
int idx = i0 * in_stride[0];
return idx;
}
// if you are reading this, there are still a lot
// optimize here to do, This class is the right class
// to make parallel reduction.
// the compute function can run inside one block,
// try to use shuffle instruction here.
// int tdim is the threads num of one block.
template <int rdim, int tdim, ReduceType type>
class ReduceCompute{
public:
__device__
static float compute(
const int* dims,
const int* rdims,
const int* in_stride,
const float* in_data, int in_idx) {
return 0;
}
};
template <int tdim, ReduceType type>
class ReduceCompute<1, tdim, type> {
public:
__device__
static float compute(
const int* dims,
const int* rdims,
const int* in_stride,
const float *in_data, int in_idx) {
// int tid = threadIdx.x;
float res = in_data[in_idx];
int idx = in_idx + in_stride[rdims[0]];
// here is the reduction op.
for (int i = 1; i < dims[rdims[0]]; ++i) {
res = ReOp<type>::compute(res, in_data[idx]);
idx += in_stride[rdims[0]];
}
return res;
}
};
template <int tdim, ReduceType type>
class ReduceCompute<2, tdim, type> {
public:
__device__
static float compute(
const int* dims,
const int* rdims,
const int* in_stride,
const float *in_data, int in_idx) {
float res0 = 0.f;
int idx0 = in_idx;
for (int i = 0; i < dims[rdims[0]]; ++i) {
float res1 = in_data[idx0];
int idx1 = idx0 + in_stride[rdims[1]];
for (int j = 1; j < dims[rdims[1]]; ++j) {
res1 = ReOp<type>::compute(res1, in_data[idx1]);
idx1 += in_stride[rdims[1]];
}
idx0 += in_stride[rdims[0]];
if (i == 0) {
res0 = res1;
} else {
res0 = ReOp<type>::compute(res0, res1);
}
}
return res0;
}
};
template <int tdim, ReduceType type>
class ReduceCompute<3, tdim, type> {
public:
__device__
static float compute(
const int* dims,
const int* rdims,
const int* in_stride,
const float *in_data, int in_idx) {
float res0 = 0.f;
int idx0 = in_idx;
for (int i = 0; i < dims[rdims[0]]; ++i) {
float res1 = 0.f;
int idx1 = idx0;
for (int j = 0; j < dims[rdims[1]]; ++j) {
float res2 = in_data[idx1];
int idx2 = idx1 + in_stride[rdims[2]];
for (int k = 1; k < dims[rdims[2]]; ++k) {
res2 = ReOp<type>::compute(res2, in_data[idx2]);
idx2 += in_stride[rdims[2]];
}
if (j == 0) {
res1 = res2;
} else {
res1 = ReOp<type>::compute(res1, res2);
}
idx1 += in_stride[rdims[1]];
}
if (i == 0) {
res0 = res1;
} else {
res0 = ReOp<type>::compute(res0, res1);
}
idx0 += in_stride[rdims[0]];
}
return res0;
}
};
template <int tdim, ReduceType type>
class ReduceCompute<4, tdim, type> {
public:
__device__
static float compute(
const int* dims,
const int* rdims,
const int* in_stride,
const float *in_data, int in_idx) {
float res0 = 0.f;
int idx0 = in_idx;
for (int i = 0; i < dims[rdims[0]]; ++i) {
float res1 = 0.f;
int idx1 = idx0;
for (int j = 0; j < dims[rdims[1]]; ++j) {
float res2 = 0.f;
int idx2 = idx1;
for (int k = 0; k < dims[rdims[2]]; ++k) {
float res3 = in_data[idx2];
int idx3 = idx2 + in_stride[rdims[3]];
for (int u = 0; u < dims[rdims[3]]; ++u) {
res3 = ReOp<type>::compute(res3, in_data[idx3]);
idx3 += in_stride[rdims[3]];
}
if (k == 0) {
res2 = res3;
} else {
res2 = ReOp<type>::compute(res2, res3);
}
idx2 += in_stride[rdims[2]];
}
if (j == 0) {
res1 = res2;
} else {
res1 = ReOp<type>::compute(res1, res2);
}
idx1 += in_stride[rdims[1]];
}
if (i == 0) {
res0 = res1;
} else {
res0 = ReOp<type>::compute(res0, res1);
}
idx0 += in_stride[rdims[0]];
}
return res0;
}
};
template <typename dtype,
ReduceType type,
int nDim,
int rDim>
__global__ void reduce(
const dtype* src,
dtype* dst,
const int* rdim,
const int* dims,
const int* i_stride,
const int* o_stride, int out_size) {
int reduce_size = 1;
for (int i = 0; i < rDim; ++i) {
reduce_size *= dims[rdim[i]];
}
float reduce_size_1 = 1.f / ((float)reduce_size);
int bid = blockIdx.x;
int out_idx = bid;
//init;
int in_idx = IndexCompute<nDim>::input_idx(i_stride, o_stride, out_idx);
float res = ReduceCompute<rDim, CUDA_NUM_THREADS, type>::compute(
dims, rdim, i_stride, src, in_idx);
dst[out_idx] = res;
if (Reduce_avg == type) {
dst[out_idx] *= reduce_size_1;
}
}
__global__
void reduce_unknow(
const float* src,
float* dst,
const int* rdim,
const int* dims,
const int* i_stride,
const int* o_stride, int out_size) {return;}
template <typename dtype,
ReduceType type,
int nDim,
int rDim>
__global__ void reduce_all(
const dtype* src,
dtype* dst,
const int* rdim,
const int* dims,
const int* i_stride,
const int* o_stride,
int out_size) {
int reduce_size = 1;
for (int i = 0; i < rDim; ++i) {
reduce_size *= dims[rdim[i]];
}
float reduce_size_1 = 1.f / ((float)reduce_size);
//init;
float res = src[0];
for (int i = 1; i < reduce_size; ++i) {
res = ReOp<type>::compute(res, src[i]);
}
dst[0] = res;
if (Reduce_avg == type) {
dst[0] *= reduce_size_1;
}
}
}
#define REG_REDUCE_TYPE_KERNEL(REDUCE_TYPE) \
_kernel_direct_map[REDUCE_TYPE] = { \
{reduce_unknow}, \
{reduce_unknow, \
reduce_all<float, REDUCE_TYPE, 1, 1>}, \
{reduce_unknow, \
reduce<float, REDUCE_TYPE, 2, 1>, \
reduce_all<float, REDUCE_TYPE, 2, 2>}, \
{reduce_unknow, \
reduce<float, REDUCE_TYPE, 3, 1>, \
reduce<float, REDUCE_TYPE, 3, 2>, \
reduce_all<float, REDUCE_TYPE, 3, 3>}, \
{reduce_unknow, \
reduce<float, REDUCE_TYPE, 4, 1>, \
reduce<float, REDUCE_TYPE, 4, 2>, \
reduce<float, REDUCE_TYPE, 4, 3>, \
reduce_all<float, REDUCE_TYPE, 4, 4>}}
template <typename dtype>
void async_copy_to_buffer(Buffer<NV> &buffer,
dtype* data, unsigned long size, cudaStream_t stream) {
buffer.re_alloc(size * sizeof(dtype));
cudaMemcpyAsync(buffer.get_data_mutable(), data,
size * sizeof(dtype), cudaMemcpyHostToDevice, stream);
}
template <>
SaberStatus SaberReduce<NV, AK_FLOAT>::create(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
ReduceParam<NV>& param, Context<NV>& ctx) {
this->_ctx = &ctx;
if (_template_reduction) {
auto stream = _ctx->get_compute_stream();
auto i_stride = inputs[0]->get_stride();
auto o_stride = outputs[0]->get_stride();
std::vector<int> ndim(inputs[0]->valid_shape());
async_copy_to_buffer<int>(_rdim_b,
param.reduce_dim.data(),
param.reduce_dim.size(), stream);
async_copy_to_buffer<int>(_ndim_b,
inputs[0]->valid_shape().data(),
inputs[0]->valid_shape().size(), stream);
async_copy_to_buffer<int>(_i_stride_b,
i_stride.data(), i_stride.size(), stream);
async_copy_to_buffer<int>(_o_stride_b,
o_stride.data(), o_stride.size(), stream);
return SaberSuccess;
} else {
return _impl->create(inputs, outputs, param, ctx);
}
}
template <>
SaberStatus SaberReduce<NV, AK_FLOAT>::init(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
ReduceParam<NV>& param, Context<NV>& ctx) {
this->_ctx = &ctx;
if (_template_reduction) {
REG_REDUCE_TYPE_KERNEL(Reduce_avg);
REG_REDUCE_TYPE_KERNEL(Reduce_min);
REG_REDUCE_TYPE_KERNEL(Reduce_max);
REG_REDUCE_TYPE_KERNEL(Reduce_sum);
REG_REDUCE_TYPE_KERNEL(Reduce_prod);
} else {
_impl = new VenderReduce<NV, AK_FLOAT>;
_impl->init(inputs, outputs, param, ctx);
}
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberReduce<NV, AK_FLOAT>::dispatch(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
ReduceParam<NV>& param) {
if (_template_reduction) {
int out_size = outputs[0]->valid_size();
_kernel_direct_map[param.reduce_type]
[inputs[0]->dims()]
[param.reduce_dim.size()] << < out_size, 1,
0, _ctx->get_compute_stream() >> > (
(const float *) inputs[0]->data(),
(float *) outputs[0]->mutable_data(),
(const int *) _rdim_b.get_data(),
(const int *) _ndim_b.get_data(),
(const int *) _i_stride_b.get_data(),
(const int *) _o_stride_b.get_data(),
outputs[0]->valid_size());
return SaberSuccess;
} else {
return _impl->dispatch(inputs, outputs, param);
}
}
template class SaberReduce<NV, AK_FLOAT>;
DEFINE_OP_TEMPLATE(SaberReduce, ReduceParam, NV, AK_HALF);
DEFINE_OP_TEMPLATE(SaberReduce, ReduceParam, NV, AK_INT8);
} // namespace saber.
} // namespace anakin.
|
b3c4f9b5fea22c31886a6693e1bd1e592485154f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char **argv) {
printf("%s Starting... \n", argv[0]);
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess) {
printf("hipGetDeviceCount return %d\n -> %s\n", (int)error_id, hipGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
if (deviceCount == 0) {
printf("There are no available device(s) that support CUDA\n");
} else {
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
int dev, driverVersion = 0, runtimeVersion = 0;
dev = 0;
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",
driverVersion / 1000, (driverVersion % 100) / 10,
runtimeVersion / 1000, (runtimeVersion % 100) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
printf(" Total amount of global memory: %.2f GBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem/(pow(1024.0, 3)),
(unsigned long long) deviceProp.totalGlobalMem);
printf(" GPU Clock rate: %.0f MHz (%.2f GHz)\n",
deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %.0f MHz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n",
deviceProp.memoryBusWidth);
if (deviceProp.l2CacheSize) {
printf(" L2 Cache Size: %d bytes\n",
deviceProp.l2CacheSize);
}
printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n",
deviceProp.maxTexture1D, deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, 2D=(%d, %d) x %d\n",
deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1],
deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of const memory: %lu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n",
deviceProp.memPitch);
exit(EXIT_SUCCESS);
} | b3c4f9b5fea22c31886a6693e1bd1e592485154f.cu | #include <cuda_runtime.h>
#include <stdio.h>
int main(int argc, char **argv) {
printf("%s Starting... \n", argv[0]);
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess) {
printf("cudaGetDeviceCount return %d\n -> %s\n", (int)error_id, cudaGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
if (deviceCount == 0) {
printf("There are no available device(s) that support CUDA\n");
} else {
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
int dev, driverVersion = 0, runtimeVersion = 0;
dev = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",
driverVersion / 1000, (driverVersion % 100) / 10,
runtimeVersion / 1000, (runtimeVersion % 100) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
printf(" Total amount of global memory: %.2f GBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem/(pow(1024.0, 3)),
(unsigned long long) deviceProp.totalGlobalMem);
printf(" GPU Clock rate: %.0f MHz (%.2f GHz)\n",
deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %.0f MHz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n",
deviceProp.memoryBusWidth);
if (deviceProp.l2CacheSize) {
printf(" L2 Cache Size: %d bytes\n",
deviceProp.l2CacheSize);
}
printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n",
deviceProp.maxTexture1D, deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, 2D=(%d, %d) x %d\n",
deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1],
deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of const memory: %lu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n",
deviceProp.memPitch);
exit(EXIT_SUCCESS);
} |
46cc9159286492c3a2a25f5b4b91008d6fa0b1a0.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include <thrust/scan.h>
#include <error.h>
template<typename IndexType, typename ValueType>
void __global__ neighbor_count_kernel(IndexType* tri0, IndexType* tri1, IndexType* tri2, IndexType ne, IndexType* nbcount)
{
for(int eidx = threadIdx.x; eidx < ne; eidx += gridDim.x * blockDim.x)
{
IndexType i = tri0[eidx];
IndexType j = tri1[eidx];
IndexType k = tri2[eidx];
atomicInc((unsigned *)nbcount + i, INT_MAX);
atomicInc((unsigned *)nbcount + j, INT_MAX);
atomicInc((unsigned *)nbcount + k, INT_MAX);
}
}
template<typename IndexType, typename ValueType>
void __global__ compute_nb_indices_kernel(IndexType* rowoffsets, IndexType* ele_indices, IndexType *tri0, IndexType* tri1, IndexType* tri2, IndexType nv, IndexType* column_indices, size_t num_cols, size_t pitch)
{
for(int nidx = threadIdx.x; nidx < nv; nidx += gridDim.x * blockDim.x)
{
for(int i = 0; i < num_cols; i++)
{
column_indices[pitch * i + nidx] = -1;
}
int nedges = 0;
for(int j = rowoffsets[nidx]; j < rowoffsets[nidx + 1]; j++)
{
IndexType jj = ele_indices[j];
IndexType node0 = tri0[jj];
IndexType node1 = tri1[jj];
IndexType node2 = tri2[jj];
if(node0 != nidx)
{
column_indices[pitch * nedges + nidx] = node0;
nedges++;
}
}
}
}
template<typename IndexType, typename ValueType>
void __global__ compute_ele_indices_kernel(IndexType* tri0, IndexType* tri1, IndexType* tri2, IndexType ne, IndexType* rowoffsets, IndexType* ele_indices)
{
for(int eidx = threadIdx.x; eidx < ne; eidx += gridDim.x * blockDim.x)
{
IndexType i = tri0[eidx];
IndexType j = tri1[eidx];
IndexType k = tri2[eidx];
IndexType starti = rowoffsets[i];
IndexType startj = rowoffsets[j];
IndexType startk = rowoffsets[k];
IndexType endi = rowoffsets[i + 1];
IndexType endj = rowoffsets[j + 1];
IndexType endk = rowoffsets[k + 1];
for(int n = starti; n < endi; n++)
{
atomicCAS(ele_indices + n, -1, eidx);
break;
}
for(int n = startj; n < endj; n++)
{
atomicCAS(ele_indices + n, -1, eidx);
break;
}
for(int n = startk; n < endk; n++)
{
atomicCAS(ele_indices + n, -1, eidx);
}
}
}
template<typename IndexType, typename ValueType>
__global__ void convert_kernel(IndexType* rowoff1, IndexType* colidx1, ValueType* values1, IndexType* rowidx2, IndexType* colidx2, ValueType* values2, int num_rows)
{
for(int ridx = blockIdx.x * blockDim.x + threadIdx.x; ridx < num_rows; ridx++)
{
IndexType start1 = rowoff1[ridx];
IndexType end1 = rowoff1[ridx + 1];
IndexType start2 = start1 * 2 - ridx;
rowidx2[start2] = ridx;
colidx2[start2] = ridx;
values2[start2] = values1[start1];
for(int i = start1 + 1; i < end1; i++)
{
ValueType v = values1[i];
IndexType col = colidx1[i];
IndexType loc = start2 + 1 + 2 * (i - start1 - 1);
rowidx2[loc] = ridx;
colidx2[loc] = col;
values2[loc] = v;
rowidx2[loc + 1] = col;
colidx2[loc + 1] = ridx;
values2[loc + 1] = v;
}
}
}
| 46cc9159286492c3a2a25f5b4b91008d6fa0b1a0.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <cutil.h>
#include <thrust/scan.h>
#include <error.h>
template<typename IndexType, typename ValueType>
void __global__ neighbor_count_kernel(IndexType* tri0, IndexType* tri1, IndexType* tri2, IndexType ne, IndexType* nbcount)
{
for(int eidx = threadIdx.x; eidx < ne; eidx += gridDim.x * blockDim.x)
{
IndexType i = tri0[eidx];
IndexType j = tri1[eidx];
IndexType k = tri2[eidx];
atomicInc((unsigned *)nbcount + i, INT_MAX);
atomicInc((unsigned *)nbcount + j, INT_MAX);
atomicInc((unsigned *)nbcount + k, INT_MAX);
}
}
template<typename IndexType, typename ValueType>
void __global__ compute_nb_indices_kernel(IndexType* rowoffsets, IndexType* ele_indices, IndexType *tri0, IndexType* tri1, IndexType* tri2, IndexType nv, IndexType* column_indices, size_t num_cols, size_t pitch)
{
for(int nidx = threadIdx.x; nidx < nv; nidx += gridDim.x * blockDim.x)
{
for(int i = 0; i < num_cols; i++)
{
column_indices[pitch * i + nidx] = -1;
}
int nedges = 0;
for(int j = rowoffsets[nidx]; j < rowoffsets[nidx + 1]; j++)
{
IndexType jj = ele_indices[j];
IndexType node0 = tri0[jj];
IndexType node1 = tri1[jj];
IndexType node2 = tri2[jj];
if(node0 != nidx)
{
column_indices[pitch * nedges + nidx] = node0;
nedges++;
}
}
}
}
template<typename IndexType, typename ValueType>
void __global__ compute_ele_indices_kernel(IndexType* tri0, IndexType* tri1, IndexType* tri2, IndexType ne, IndexType* rowoffsets, IndexType* ele_indices)
{
for(int eidx = threadIdx.x; eidx < ne; eidx += gridDim.x * blockDim.x)
{
IndexType i = tri0[eidx];
IndexType j = tri1[eidx];
IndexType k = tri2[eidx];
IndexType starti = rowoffsets[i];
IndexType startj = rowoffsets[j];
IndexType startk = rowoffsets[k];
IndexType endi = rowoffsets[i + 1];
IndexType endj = rowoffsets[j + 1];
IndexType endk = rowoffsets[k + 1];
for(int n = starti; n < endi; n++)
{
atomicCAS(ele_indices + n, -1, eidx);
break;
}
for(int n = startj; n < endj; n++)
{
atomicCAS(ele_indices + n, -1, eidx);
break;
}
for(int n = startk; n < endk; n++)
{
atomicCAS(ele_indices + n, -1, eidx);
}
}
}
template<typename IndexType, typename ValueType>
__global__ void convert_kernel(IndexType* rowoff1, IndexType* colidx1, ValueType* values1, IndexType* rowidx2, IndexType* colidx2, ValueType* values2, int num_rows)
{
for(int ridx = blockIdx.x * blockDim.x + threadIdx.x; ridx < num_rows; ridx++)
{
IndexType start1 = rowoff1[ridx];
IndexType end1 = rowoff1[ridx + 1];
IndexType start2 = start1 * 2 - ridx;
rowidx2[start2] = ridx;
colidx2[start2] = ridx;
values2[start2] = values1[start1];
for(int i = start1 + 1; i < end1; i++)
{
ValueType v = values1[i];
IndexType col = colidx1[i];
IndexType loc = start2 + 1 + 2 * (i - start1 - 1);
rowidx2[loc] = ridx;
colidx2[loc] = col;
values2[loc] = v;
rowidx2[loc + 1] = col;
colidx2[loc + 1] = ridx;
values2[loc + 1] = v;
}
}
}
|
3af4ba1ac5e1660a126d88627767693adbf71614.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//PASS
//--blockDim=1024 --gridDim=1 --no-inline
struct wrapped {
unsigned int bidx;
unsigned int bdim;
unsigned int tidx;
};
__device__ float multiplyByTwo(float *v, wrapped tw)
{
return 0.0f;
}
__device__ float divideByTwo(float *v, wrapped tw)
{
return 0.0f;
}
typedef float(*funcType)(float*, wrapped);
__global__ void foo(float *v, funcType f, unsigned int size)
{
__requires(f == multiplyByTwo | f == divideByTwo);
}
| 3af4ba1ac5e1660a126d88627767693adbf71614.cu | //PASS
//--blockDim=1024 --gridDim=1 --no-inline
struct wrapped {
unsigned int bidx;
unsigned int bdim;
unsigned int tidx;
};
__device__ float multiplyByTwo(float *v, wrapped tw)
{
return 0.0f;
}
__device__ float divideByTwo(float *v, wrapped tw)
{
return 0.0f;
}
typedef float(*funcType)(float*, wrapped);
__global__ void foo(float *v, funcType f, unsigned int size)
{
__requires(f == multiplyByTwo | f == divideByTwo);
}
|
82296dfe7e97cf0bc1830d5a5186915ab20db7c1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
//#include <helper_cuda.h>
#define DEBUG 0
#define ENUM_NUM 17 //16
#define UNKNOWN_NUM 64
#define POLY_NUM 64
#define PARA_NUM 64
#define NONLINEAR_NUM 65
#define SOL_MAX_NUM 200
#define RESULT_MAX_NUM 5
//#define SET_VAL (((value_t)14764175488)<< ENUM_NUM)
#define BLOCK_NUM 32 //2^5
#define THREAD_NUM 256 // 2^8
#define THREADS_SHIFT 13 // (5+8)
typedef long value_t; // to save values of variables.
//typedef unsigned long constpart_t; // the part with no parameters.
typedef unsigned long linearpart_t; // to save 32 unknowns and 1 contants.
typedef unsigned long squarepart_t;
typedef unsigned long oripoly_t;
static inline void binary_print(value_t val, int len) {
for (int i = 0; i < len; i++) {
if (val & ((value_t)1 << i)) {
printf("1");
} else {
printf("0");
}
if ((i + 1) % 5 == 0) {
printf(" ");
}
}
}
typedef unsigned char UINT8;
typedef unsigned long long UINT64;
__device__ linearpart_t d_linear_mat[ENUM_NUM * POLY_NUM * 2];
__device__ oripoly_t d_polys_mat[NONLINEAR_NUM * (POLY_NUM + UNKNOWN_NUM + 1) * 3];
__device__ squarepart_t d_square_mat[ENUM_NUM * POLY_NUM];
__device__ value_t d_var_all[2560];
static inline __host__ __device__ int largestpos(value_t val, int len) {
for (int i = len - 1; i >= 0; i--) {
if (val & ((value_t) 1 << i)) {
return i;
}
}
return -1;
}
static inline __host__ __device__ int largestpos_2(value_t val0, value_t val1, int len) {
int p = 0;
if (len > 64 && len <= 128) {
p = largestpos(val1, len - 64);
if (p > -1) {
return p + 64;
} else {
p = largestpos(val0, 64);
}
} else {
p = largestpos(val0, 64);
}
return p;
}
static inline value_t gauss_host(linearpart_t working_mat[POLY_NUM][2],
const int poly_num, const int unknown_num, value_t solutions[SOL_MAX_NUM]) {
int pos_arr[POLY_NUM]; // bear revised
int rank = 0;
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
pos_arr[pi] = largestpos_2(working_mat[pi][0],working_mat[pi][1], unknown_num + 1);
rank++;
if (pos_arr[pi] == 0) {
return 0;
}
for (int j = pi + 1; j < POLY_NUM; j++) {
if(working_mat[j][pos_arr[pi]/64] & ((linearpart_t)1 << (pos_arr[pi] % 64))){
working_mat[j][0] ^= (working_mat[pi][0]);
working_mat[j][1] ^= (working_mat[pi][1]);
}
}
}
// back
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
for (int j = 0; j < pi; j++) {
if (working_mat[j][pos_arr[pi] / 64]
& ((linearpart_t) 1 << (pos_arr[pi] % 64))) {
working_mat[j][0] ^= (working_mat[pi][0]);
working_mat[j][1] ^= (working_mat[pi][1]);
}
}
}
if (rank == unknown_num) {
// only one solution.
solutions[0] = 0;
;
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0 ) {
continue;
}
if (working_mat[pi][0] & (linearpart_t)1) {
solutions[0] ^= ((value_t)1 << (pos_arr[pi]-1));
}
}
return 1;
} else {
// now troubles come
solutions[0] = 0;
value_t sol_num = 1;
bool appear[UNKNOWN_NUM + 1] = { 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
appear[pos_arr[pi]] = true;
if (working_mat[pi][0] & (linearpart_t)1) {
solutions[0] ^= ((value_t)1 << (pos_arr[pi]-1));
}
}
// duplicate solutions.
for (int i = 1; i < UNKNOWN_NUM+1; i++) { // liting revised
if (appear[i] == false) {
for (int j = 0; j < sol_num; j++) {
// bear revised
solutions[j + sol_num] = (solutions[j]) ^ ((value_t)1 << (i-1));
}
// bear added
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0 ) {
continue;
}
if(i < 64){
for (int j = 0; j < sol_num * ((working_mat[pi][0] & (((linearpart_t) 1) << i)) != 0); j++) {
solutions[j + sol_num] ^= ((value_t) 1 << (pos_arr[pi] - 1));
}
}else{
for (int j = 0; j < sol_num * ((working_mat[pi][1] & (((linearpart_t) 1) << i-64)) != 0); j++) {
solutions[j + sol_num] ^= ((value_t) 1 << (pos_arr[pi] - 1));
}
}
}
sol_num *= 2;
}
}
return sol_num;
}
}
static inline __device__ value_t gauss(value_t solutions[SOL_MAX_NUM], linearpart_t working_mat[POLY_NUM][2],
const int poly_num, const int unknown_num) {
// bear revised
int pos_arr[POLY_NUM]; // bear revised
int rank = 0;
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
pos_arr[pi] = largestpos_2(working_mat[pi][0], working_mat[pi][1],unknown_num + 1);
rank++;
if (pos_arr[pi] == 0) {
return 0;
}
for (int j = pi + 1; j < POLY_NUM; j++) {
if (working_mat[j][pos_arr[pi] / 64]
& ((linearpart_t) 1 << (pos_arr[pi] % 64))) {
working_mat[j][0] ^= (working_mat[pi][0]);
working_mat[j][1] ^= (working_mat[pi][1]);
}
}
}
// back
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
for (int j = 0; j < pi; j++) {
if (working_mat[j][pos_arr[pi] / 64]
& ((linearpart_t) 1 << (pos_arr[pi] % 64))) {
working_mat[j][0] ^= (working_mat[pi][0]);
working_mat[j][1] ^= (working_mat[pi][1]);
}
}
}
if (rank == unknown_num) {
// only one solution.
solutions[0]= 0;
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
if (working_mat[pi][0] & (linearpart_t) 1) {
solutions[0] ^= ((value_t)1 << (pos_arr[pi]-1));
}
}
return 1;
} else {
// now troubles come
// now troubles come
solutions[0] = 0;
value_t sol_num = 1;
//liting revised
bool appear[UNKNOWN_NUM + 1] = { 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
appear[pos_arr[pi]] = true;
if (working_mat[pi][0] & (linearpart_t) 1) {
solutions[0] ^= ((value_t)1 << (pos_arr[pi]-1));
}
}
// duplicate solutions.
for (int i = 1; i < UNKNOWN_NUM + 1; i++) { // liting revised
if (appear[i] == false) {
for (int j = 0; j < sol_num; j++) {
// bear revised
solutions[j + sol_num] = (solutions[j]) ^ ((value_t)1 << (i-1));
}
// bear added
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0 ) {
continue;
}
if(i < 64){
for (int j = 0; j < sol_num * ((working_mat[pi][0] & (((linearpart_t) 1) << i)) != 0); j++) {
solutions[j + sol_num] ^= ((value_t) 1 << (pos_arr[pi] - 1));
}
}else{
for (int j = 0; j < sol_num * ((working_mat[pi][1] & (((linearpart_t) 1) << i-64)) != 0); j++) {
solutions[j + sol_num] ^= ((value_t) 1 << (pos_arr[pi] - 1));
}
}
}
sol_num *= 2;
}
}
return sol_num;
}
}
__global__ void solveLinear(const linearpart_t *d_working_mat_copy,
const squarepart_t *d_const_mat, value_t *d_val, char *d_bound, value_t *d_sol_total,value_t* result) {
int thidx = blockDim.x * blockIdx.x + threadIdx.x;
value_t val = d_val[thidx];
char bound = d_bound[thidx];
value_t res[2 * RESULT_MAX_NUM];
char res_num = 0;
linearpart_t working_mat[POLY_NUM][2]; // initialized as the const part of linear matrix. also used as the results of linear part.
linearpart_t working_mat_copy[POLY_NUM][2];
squarepart_t const_mat[POLY_NUM];
d_sol_total[thidx] = 0;
oripoly_t cstpoly[3] = {0, 0, 0};
//copy data from device
for(int i = 0; i < 2 * RESULT_MAX_NUM; i++){
res[i] = 0;
}
res[0] = result[thidx * 2 * RESULT_MAX_NUM];
res[1] = result[thidx * 2 * RESULT_MAX_NUM + 1];
for (int i = 0; i < POLY_NUM; i++) {
working_mat_copy[i][0] = d_working_mat_copy[thidx * POLY_NUM * 2 + i*2];
working_mat_copy[i][1] = d_working_mat_copy[thidx * POLY_NUM * 2 + i*2 + 1];
const_mat[i] = d_const_mat[thidx * POLY_NUM + i];
}
//todo to be finished!!!!
// for(int i=0; i< NONLINEAR_NUM;i++){
// for(int j = 0; j < PARA_NUM + UNKNOWN_NUM + 1; j++){
// polys[i][j][0] = d_polys_mat[i * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + j * 3];
// polys[i][j][1] = d_polys_mat[i * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + j * 3 + 1];
// polys[i][j][2] = d_polys_mat[i * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + j * 3 + 2];
//
// }
// }
// main loop.
for (value_t count = 1; count < (1 << ENUM_NUM); count++) {
// generate the next gray code
int pos = 64-__ffsll(__brevll(count ^ (count - 1)));
val = val ^ ((value_t) 1 << pos);
for (int pi = 0; pi < POLY_NUM; pi++) {
working_mat_copy[pi][0] ^= d_linear_mat[pos * POLY_NUM * 2 + pi * 2];
working_mat_copy[pi][1] ^= d_linear_mat[pos * POLY_NUM * 2 + pi * 2 + 1];
const_mat[pi] ^= d_square_mat[pos * POLY_NUM + pi];
working_mat[pi][0] = working_mat_copy[pi][0];
working_mat[pi][1] = working_mat_copy[pi][1];
value_t w = const_mat[pi] & val;
working_mat[pi][0] ^= (bool)((__popcll((unsigned long long int)w)) & (value_t) 1);
}
value_t solutions[SOL_MAX_NUM];
value_t sol_num = 0;
// gauss
sol_num = gauss(solutions, working_mat, POLY_NUM, UNKNOWN_NUM);
d_sol_total[thidx] += sol_num;
// verify on 3 round keccak.
for(int s = 0;s < sol_num;s++){
int zero_num = 0;
int one_num = 0;
int op;
for (op = 0; op < NONLINEAR_NUM; op++) {
cstpoly[0] = d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + UNKNOWN_NUM) * 3];
cstpoly[1] = d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + UNKNOWN_NUM) * 3 + 1];
cstpoly[2] = d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + UNKNOWN_NUM) * 3 + 2];
// cstpoly[0] = polys[op][PARA_NUM + UNKNOWN_NUM][0];
// cstpoly[1] = polys[op][PARA_NUM + UNKNOWN_NUM][1];
// cstpoly[2] = polys[op][PARA_NUM + UNKNOWN_NUM][2];
// for parameters.
for (int pa = 0; pa < PARA_NUM; pa++) {
if (val & ((value_t) 1 << pa)) {
cstpoly[0] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM - pa -1) * 3];
cstpoly[1] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM - pa -1) * 3 + 1];
cstpoly[2] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM - pa -1) * 3 + 2];
// cstpoly[0] ^= polys[op][PARA_NUM - pa -1][0];
// cstpoly[1] ^= polys[op][PARA_NUM - pa -1][1];
// cstpoly[2] ^= polys[op][PARA_NUM - pa -1][2];
}
}
for (int un = 0; un < UNKNOWN_NUM; un++) {
if (solutions[s] & ((value_t) 1 << un)) {
cstpoly[0] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + un) * 3];
cstpoly[1] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + un) * 3 + 1];
cstpoly[2] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + un) * 3 + 2];
// cstpoly[0] ^= polys[op][ PARA_NUM + un][0];
// cstpoly[1] ^= polys[op][ PARA_NUM + un][1];
// cstpoly[2] ^= polys[op][ PARA_NUM + un][2];
}
}
// evaluate
cstpoly[0] = cstpoly[0] & val;
cstpoly[1] = cstpoly[1] & solutions[s];
cstpoly[2] = cstpoly[2] & ((oripoly_t)1);
value_t w = cstpoly[0] ^ cstpoly[1] ^ cstpoly[2];
w = (w) ^ (w >> 32);
w = (w) ^ (w >> 16);
w = (w) ^ (w >> 8);
w = (w) ^ (w >> 4);
w = (w) ^ (w >> 2);
w = (w) ^ (w >> 1);
if (w & (value_t) 1) {
zero_num ++;
if(zero_num > NONLINEAR_NUM - bound){
break;
}
}else{
one_num++;
}
}
if(zero_num + one_num == NONLINEAR_NUM && one_num >= bound){
if(one_num > bound){
bound = one_num;
res_num = 0;
for(int ir = 0; ir < 2 * RESULT_MAX_NUM; ir++ ){
res[ir] = 0;
}
}
if(res_num < RESULT_MAX_NUM){
res[res_num * 2] = val;
res[res_num * 2 + 1 ] = solutions[s];
res_num ++;
}
// printf("\nval:%lu,sol:%lu,count:%d(bound:%d) thidx:%d \n",val,solutions[s],one_num,bound, thidx);
}
}
}
d_bound[thidx] = bound;
for(int i = 0; i < 2 * RESULT_MAX_NUM; i++){
result[thidx * 2 * RESULT_MAX_NUM + i] = res[i];
}
}
int main(int argc, char** argv) {
char bound = 0;
const int para_num = PARA_NUM;
const int enum_num = ENUM_NUM;
const int ori_num = NONLINEAR_NUM;
//const int set_num = para_num - enum_num;
value_t set_val = atol(argv[1])<<THREADS_SHIFT;
// value_t set_val = 0;
const int poly_num = POLY_NUM;
const int unknown_num = UNKNOWN_NUM;
linearpart_t linear_mat[para_num][poly_num][2];
linearpart_t working_mat[poly_num][2]; // initialized as the const part of linear matrix. also used as the results of linear part.
linearpart_t working_mat_copy[poly_num][2];
linearpart_t working_mat_file[poly_num][2];
squarepart_t square_mat[para_num][poly_num];
squarepart_t const_mat[poly_num]; // used to compute the const part from square polys.
oripoly_t polys[ori_num][para_num + unknown_num + 1][3];
oripoly_t cstpoly[3];
// hipSetDevice(0);
hipSetDevice(atoi(argv[2]));
FILE *in1 = fopen("../data/mat_files/linear_mat.txt", "r+");
FILE *in2 = fopen("../data/mat_files/square_mat.txt", "r+");
FILE *in3 = fopen("../data/mat_files/poly_mat.txt", "r+");
FILE *in4 = fopen("../data/mat_files/working_mat.txt", "r+");
FILE *out = fopen("../data/mat_files/solve_mat_result.txt","a+");
char c1, c2, c3, c4;
for (int i = 0; i < para_num; i++) {
for (int j = 0; j < poly_num; j++) {
linear_mat[i][j][0] = 0;
linear_mat[i][j][1] = 0;
square_mat[i][j] = 0;
for (int k = 0; k < 128; k++) {
fscanf(in1, "%c", &c1);
while (c1 != '0' && c1 != '1') {
fscanf(in1, "%c", &c1);
}
if (c1 == '1') {
linear_mat[i][j][k/64] ^= ((linearpart_t) 1 << (k-((int)k/64)*64));
}
}
for (int k = 0; k < para_num; k++) {
fscanf(in2, "%c", &c2);
while (c2 != '0' && c2 != '1') {
fscanf(in2, "%c", &c2);
}
if (c2 == '1') {
square_mat[i][j] ^= ((squarepart_t) 1 << (para_num - 1) - k);
}
}
}
}
for (int i = 0; i < ori_num; i++) {
for (int j = 0; j < para_num + unknown_num + 1; j++) {
polys[i][j][0] = 0;
polys[i][j][1] = 0;
polys[i][j][2] = 0;
for (int k = 0; k < 192; k++) {
fscanf(in3, "%c", &c3);
while (c3 != '0' && c3 != '1') {
fscanf(in3, "%c", &c3);
}
if (k < para_num && c3 == '1') {
polys[i][j][0] ^= ((oripoly_t) 1 << (para_num - k -1));
} else if (k >= para_num && k<para_num+unknown_num && c3 == '1') {
polys[i][j][1] ^= ((oripoly_t) 1 << (k - para_num));
}else if(c3 == '1'){
polys[i][j][2] ^= ((oripoly_t) 1);
}
}
}
}
for (int i = 0; i < poly_num; i++) {
working_mat[i][0] = 0;
working_mat[i][1] = 0;
for (int j = 0; j < 128; j++) {
fscanf(in4, "%c", &c4);
while (c4 != '0' && c4 != '1') {
fscanf(in4, "%c", &c4);
}
if (c4 == '1') {
working_mat[i][(int)j/64] ^= ((linearpart_t) 1 << (j - ((int)j/64)*64));
}
}
working_mat_file[i][0] = working_mat[i][0];
working_mat_file[i][1] = working_mat[i][1];
}
fclose(in1);
fclose(in2);
fclose(in3);
fclose(in4);
printf("finish reading file!\n");
linearpart_t linear_mat_enum[ENUM_NUM * POLY_NUM * 2];
squarepart_t square_mat_enum[ENUM_NUM * POLY_NUM];
oripoly_t polys_mat_enum[ori_num * (para_num + unknown_num + 1) * 3 ];
for (int i = 0; i < ENUM_NUM; i++) {
for (int j = 0; j < POLY_NUM; j++) {
for (int k = 0; k < 2; k++) {
linear_mat_enum[i * POLY_NUM * 2 + j * 2 + k] =
linear_mat[i][j][k];
}
}
}
for (int i = 0; i < ENUM_NUM; i++) {
for (int j = 0; j < POLY_NUM; j++) {
square_mat_enum[i * POLY_NUM + j] = square_mat[i][j];
}
}
for(int i = 0; i < ori_num; i++){
for(int j = 0; j < para_num + unknown_num + 1; j++){
for(int k = 0; k < 3; k++){
polys_mat_enum[i * (para_num + unknown_num + 1) * 3 + j * 3 + k] = polys[i][j][k];
}
}
}
hipMemcpyToSymbol(d_linear_mat, linear_mat_enum,
2 * ENUM_NUM * POLY_NUM * sizeof(linearpart_t));
hipMemcpyToSymbol(d_square_mat, square_mat_enum,
ENUM_NUM * POLY_NUM * sizeof(squarepart_t));
hipMemcpyToSymbol(d_polys_mat, polys_mat_enum,
3 * NONLINEAR_NUM * (PARA_NUM + UNKNOWN_NUM + 1) * sizeof(oripoly_t));
printf("finish copying device memory!\n");
hipError_t err = hipSuccess;
int thidx = BLOCK_NUM * THREAD_NUM;
value_t *d_val = NULL;
err = hipMalloc((void **) &d_val, thidx * sizeof(value_t));
if (err != hipSuccess) {
printf("Failed to allocate device value (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
char *d_bound = NULL;
err = hipMalloc((void **) &d_bound, thidx * sizeof(char));
if (err != hipSuccess) {
printf("Failed to allocate device value (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
value_t *d_sol_total = NULL;
err = hipMalloc((void **) &d_sol_total, thidx * sizeof(value_t));
if (err != hipSuccess) {
printf("Failed to allocate device value (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
linearpart_t *d_working_mat_copy = NULL;
err = hipMalloc((void **) &d_working_mat_copy,
thidx * poly_num * 2 * sizeof(linearpart_t));
if (err != hipSuccess) {
fprintf(stderr,
"Failed to allocate device working_mat_copy (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
squarepart_t *d_const_mat = NULL;
err = hipMalloc((void **) &d_const_mat,
thidx * poly_num * sizeof(squarepart_t));
if (err != hipSuccess) {
fprintf(stderr,
"Failed to allocate devices const_mat (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
if (err != hipSuccess) {
fprintf(stderr,
"Failed to copy oripolys from host to device (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
value_t h_result[thidx * RESULT_MAX_NUM * 2];
for(int i = 0; i < thidx * RESULT_MAX_NUM * 2 ; i ++){
h_result[i] = 0;
}
printf("finish allocate device memory!\n");
int s_total_p0 = 0;
value_t *val_arr = (value_t*)calloc(thidx, sizeof(value_t));
linearpart_t *working_mat_copy_arr = (linearpart_t*)calloc(thidx * POLY_NUM * 2, sizeof(linearpart_t));
squarepart_t *const_mat_arr = (squarepart_t*)calloc(thidx * POLY_NUM, sizeof(squarepart_t));
char *bound_arr = (char*)calloc(thidx, sizeof(short));
for (int thi = 0; thi < thidx; thi++) {
value_t sol_num = 0;
value_t solutions[SOL_MAX_NUM];
//int sol_total = 0;
value_t val = (set_val + (value_t) thi) << ENUM_NUM;
val_arr[thi] = val;
for (int pi = 0; pi < POLY_NUM; pi++) {
working_mat[pi][0] = working_mat_file[pi][0];
working_mat[pi][1] = working_mat_file[pi][1];
const_mat[pi] = 0;
}
for (int pos = enum_num; pos < para_num; pos++) {
if (val & ((value_t) 1 << pos)) {
for (int pi = 0; pi < poly_num; pi++) {
working_mat[pi][0] ^= linear_mat[pos][pi][0];
working_mat[pi][1] ^= linear_mat[pos][pi][1];
}
for (int pi = 0; pi < poly_num; pi++) {
const_mat[pi] ^= square_mat[pos][pi];
}
}
}
for (int i = 0; i < poly_num; i++) {
working_mat_copy[i][0] = working_mat[i][0];
working_mat_copy[i][1] = working_mat[i][1];
working_mat_copy_arr[thi * POLY_NUM * 2 + 2 * i] = working_mat_copy[i][0];
working_mat_copy_arr[thi * POLY_NUM * 2 + 2 * i + 1] = working_mat_copy[i][1];
const_mat_arr[thi * POLY_NUM + i] = const_mat[i];
}
for (int pi = 0; pi < poly_num; pi++) {
value_t w = const_mat[pi] & val;
w = (w) ^ (w >> 32);
w = (w) ^ (w >> 16);
w = (w) ^ (w >> 8);
w = (w) ^ (w >> 4);
w = (w) ^ (w >> 2);
w = (w) ^ (w >> 1);
if (w & (value_t) 1) {
working_mat[pi][0] ^= (linearpart_t) 1;
}
}
sol_num = gauss_host(working_mat, POLY_NUM, UNKNOWN_NUM, solutions);
s_total_p0 += sol_num;
//verify the solutions
for (int s = 0; s < sol_num; s++) {
int one_num = 0;
int zero_num = 0;
int op;
for (op = 0; op < ori_num; op++) {
cstpoly[0] = polys[op][para_num + unknown_num][0];
cstpoly[1] = polys[op][para_num + unknown_num][1];
cstpoly[2] = polys[op][para_num + unknown_num][2];
// for parameters.
for (int pa = 0; pa < para_num; pa++) {
if (val & ((value_t) 1 << pa)) {
cstpoly[0] ^= polys[op][para_num - pa - 1][0];
cstpoly[1] ^= polys[op][para_num - pa - 1][1];
cstpoly[2] ^= polys[op][para_num - pa - 1][2];
}
}
for (int un = 0; un < unknown_num; un++) {
if (solutions[s] & ((value_t) 1 << un)) {
cstpoly[0] ^= polys[op][para_num + un][0];
cstpoly[1] ^= polys[op][para_num + un][1];
cstpoly[2] ^= polys[op][para_num + un][2];
}
}
cstpoly[2] ^= polys[op][unknown_num + para_num][2];
// evaluate
cstpoly[0] = cstpoly[0] & val;
cstpoly[1] = cstpoly[1] & solutions[s];
cstpoly[2] = cstpoly[2] & ((oripoly_t) 1);
value_t w = cstpoly[0] ^ cstpoly[1] ^ cstpoly[2];
w = (w) ^ (w >> 32);
w = (w) ^ (w >> 16);
w = (w) ^ (w >> 8);
w = (w) ^ (w >> 4);
w = (w) ^ (w >> 2);
w = (w) ^ (w >> 1);
if (w & (value_t) 1) {
zero_num++;
if (zero_num > ori_num - bound) {
break;
}
} else {
one_num++;
}
}
if (zero_num + one_num == ori_num && one_num > bound) {
//fprintf(out, "n=%d,val=0x%016x,sol=0x%016x\n",one_num, val, solutions[s]);
h_result[thi * RESULT_MAX_NUM * 2] = val;
h_result[thi * RESULT_MAX_NUM * 2 + 1] = solutions[s];
fprintf(out,"n=%d,val=0x",one_num);
//fprintf(out,"val=0x%016x,sol=0x%016x\n",h_result[i * 2 * RESULT_MAX_NUM + j * 2],h_result[i*2 * RESULT_MAX_NUM + j * 2 + 1]);
fprintf(out, "%08x", (unsigned int)(val >> 32));
fprintf(out, "%08x,", (unsigned int)(val & 0xFFFFFFFFULL));
fprintf(out, "sol=0x");
fprintf(out, "%08x", (unsigned int)(solutions[s] >> 32));
fprintf(out, "%08x\n", (unsigned int)(solutions[s] & 0xFFFFFFFFULL));
}
if (one_num > bound) {
bound = one_num;
bound_arr[thi] = bound;
}
}
}
for(int i = 0; i < thidx;i++){
if(bound_arr[i] < bound){
bound_arr[i] = bound;
h_result[i * RESULT_MAX_NUM * 2] = 0;
h_result[i * RESULT_MAX_NUM * 2 + 1] = 0;
}
}
// for(int i = 0; i < thidx;i++){
// printf("No. %d, bound:%d, val:%lu, sol:%lu\n",i,bound_arr[i],h_result[i * RESULT_MAX_NUM * 2],h_result[i * RESULT_MAX_NUM * 2+1]);
// }
printf("finish cpu computing! the bound is %d now...\n", bound);
//begin device part
err = hipMemcpy(d_val, val_arr, thidx * sizeof(value_t),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("Failed to copy value from host to device (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_bound, bound_arr, thidx * sizeof(char),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("Failed to copy bound from host to device (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
value_t *d_result = NULL;
err = hipMalloc((void **) &d_result, thidx * RESULT_MAX_NUM * 2 * sizeof(value_t));
if (err != hipSuccess) {
fprintf(stderr,
"Failed to allocate devices result (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_result, h_result,
thidx * RESULT_MAX_NUM * 2 * sizeof(value_t),hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr,
"Failed to copy result from host to device (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_working_mat_copy, working_mat_copy_arr,
thidx * 2 * poly_num * sizeof(linearpart_t), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr,
"Failed to copy working_mat_copy from host to device (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_const_mat, const_mat_arr,
thidx * poly_num * sizeof(squarepart_t), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr,
"Failed to copy const_mat from host to device (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("enum num : %d\nblock num : %d\nthread num : %d\n", ENUM_NUM,
BLOCK_NUM, THREAD_NUM);
hipEvent_t start1;
hipEventCreate(&start1);
hipEvent_t stop1;
hipEventCreate(&stop1);
hipEventRecord(start1, NULL);
printf("begin solve linear system!\n");
hipLaunchKernelGGL(( solveLinear), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, d_working_mat_copy, d_const_mat,
d_val, d_bound, d_sol_total,d_result);
hipEventRecord(stop1, NULL);
hipEventSynchronize(stop1);
float msecTotal1 = 0.0f;
hipEventElapsedTime(&msecTotal1, start1, stop1);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Failed to launch solveLinear kernel (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
value_t h_sol_total[thidx];
err = hipMemcpy(h_sol_total, d_sol_total, thidx * sizeof(value_t),
hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr,
"Failed to copy total solution numbers from device to host (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(h_result, d_result, thidx * RESULT_MAX_NUM * 2 * sizeof(value_t),
hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr,
"Failed to copy result from device to host (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(bound_arr, d_bound, thidx * sizeof(char),
hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr,
"Failed to copy bound from device to host (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("\n------------ begin sort ---------------\n");
// for(int i = 0; i < thidx; i++){
// printf("bound:%d\n",bound_arr[i]);
// printf("1. val:%lu, sol:%lu\n", h_result[i * 10],h_result[i*10 + 1]);
// printf("2. val:%lu, sol:%lu\n", h_result[i * 10 + 2],h_result[i*10 + 3]);
// printf("3. val:%lu, sol:%lu\n", h_result[i * 10 + 4],h_result[i*10 + 5]);
// printf("4. val:%lu, sol:%lu\n", h_result[i * 10 + 6],h_result[i*10 + 7]);
// printf("5. val:%lu, sol:%lu\n", h_result[i * 10 + 8],h_result[i*10 + 9]);
//
// }
for(int i = 0; i < thidx; i++){
for(int j = i + 1; j < thidx; j++){
if(bound_arr[i] > bound_arr[j]){
char temp = bound_arr[i];
bound_arr[i] = bound_arr[j];
bound_arr[j] = temp;
for(int ri = 0;ri < 2 * RESULT_MAX_NUM; ri ++){
value_t temp = h_result[i * 2 * RESULT_MAX_NUM + ri];
h_result[i * 2 * RESULT_MAX_NUM + ri] = h_result[j * 2 * RESULT_MAX_NUM + ri];
h_result[j * 2 * RESULT_MAX_NUM + ri] = temp;
}
}
}
}
// printf("\n------------ finish sort ---------------\n");
printf("bound:%d\n",bound_arr[thidx-1]);
// printf("1. val:%lu, sol:%lu\n", h_result[i * 10],h_result[i*10 + 1]);
// printf("2. val:%lu, sol:%lu\n", h_result[i * 10 + 2],h_result[i*10 + 3]);
// printf("3. val:%lu, sol:%lu\n", h_result[i * 10 + 4],h_result[i*10 + 5]);
// printf("4. val:%lu, sol:%lu\n", h_result[i * 10 + 6],h_result[i*10 + 7]);
// printf("5. val:%lu, sol:%lu\n", h_result[i * 10 + 8],h_result[i*10 + 9]);
for(int i = thidx - 1;i >= 0;i--){
for(int j = 0,k = 0; j < RESULT_MAX_NUM; j++, k++){
if(h_result[i * 2 * RESULT_MAX_NUM + j * 2] != 0 && h_result[i*2 * RESULT_MAX_NUM + j * 2 + 1]!= 0){
fprintf(out,"n=%d,val=0x",bound_arr[i]);
//fprintf(out,"val=0x%016x,sol=0x%016x\n",h_result[i * 2 * RESULT_MAX_NUM + j * 2],h_result[i*2 * RESULT_MAX_NUM + j * 2 + 1]);
fprintf(out, "%08x", (unsigned int)(h_result[i * 2 * RESULT_MAX_NUM + j * 2] >> 32));
fprintf(out, "%08x,", (unsigned int)(h_result[i * 2 * RESULT_MAX_NUM + j * 2] & 0xFFFFFFFFULL));
fprintf(out, "sol=0x");
fprintf(out, "%08x", (unsigned int)(h_result[i*2 * RESULT_MAX_NUM + j * 2 + 1] >> 32));
fprintf(out, "%08x\n", (unsigned int)(h_result[i*2 * RESULT_MAX_NUM + j * 2 + 1] & 0xFFFFFFFFULL));
}
// fprintf(out,"2. val:%lu, sol:%lu\n", h_result[i * 2 * RESULT_MAX_NUM + 2],h_result[i*2 * RESULT_MAX_NUM + 3]);
}
if(bound_arr[i] > bound_arr[i-1]){
break;
}
}
printf("time:%.3lf ms\n---------------------------------------\n", msecTotal1);
hipFree(d_working_mat_copy);
hipFree(d_const_mat);
hipFree(d_val);
hipFree(d_bound);
hipFree(d_sol_total);
hipFree(d_result);
}
| 82296dfe7e97cf0bc1830d5a5186915ab20db7c1.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
//#include <helper_cuda.h>
#define DEBUG 0
#define ENUM_NUM 17 //16
#define UNKNOWN_NUM 64
#define POLY_NUM 64
#define PARA_NUM 64
#define NONLINEAR_NUM 65
#define SOL_MAX_NUM 200
#define RESULT_MAX_NUM 5
//#define SET_VAL (((value_t)14764175488)<< ENUM_NUM)
#define BLOCK_NUM 32 //2^5
#define THREAD_NUM 256 // 2^8
#define THREADS_SHIFT 13 // (5+8)
typedef long value_t; // to save values of variables.
//typedef unsigned long constpart_t; // the part with no parameters.
typedef unsigned long linearpart_t; // to save 32 unknowns and 1 contants.
typedef unsigned long squarepart_t;
typedef unsigned long oripoly_t;
static inline void binary_print(value_t val, int len) {
for (int i = 0; i < len; i++) {
if (val & ((value_t)1 << i)) {
printf("1");
} else {
printf("0");
}
if ((i + 1) % 5 == 0) {
printf(" ");
}
}
}
typedef unsigned char UINT8;
typedef unsigned long long UINT64;
__device__ linearpart_t d_linear_mat[ENUM_NUM * POLY_NUM * 2];
__device__ oripoly_t d_polys_mat[NONLINEAR_NUM * (POLY_NUM + UNKNOWN_NUM + 1) * 3];
__device__ squarepart_t d_square_mat[ENUM_NUM * POLY_NUM];
__device__ value_t d_var_all[2560];
static inline __host__ __device__ int largestpos(value_t val, int len) {
for (int i = len - 1; i >= 0; i--) {
if (val & ((value_t) 1 << i)) {
return i;
}
}
return -1;
}
static inline __host__ __device__ int largestpos_2(value_t val0, value_t val1, int len) {
int p = 0;
if (len > 64 && len <= 128) {
p = largestpos(val1, len - 64);
if (p > -1) {
return p + 64;
} else {
p = largestpos(val0, 64);
}
} else {
p = largestpos(val0, 64);
}
return p;
}
static inline value_t gauss_host(linearpart_t working_mat[POLY_NUM][2],
const int poly_num, const int unknown_num, value_t solutions[SOL_MAX_NUM]) {
int pos_arr[POLY_NUM]; // bear revised
int rank = 0;
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
pos_arr[pi] = largestpos_2(working_mat[pi][0],working_mat[pi][1], unknown_num + 1);
rank++;
if (pos_arr[pi] == 0) {
return 0;
}
for (int j = pi + 1; j < POLY_NUM; j++) {
if(working_mat[j][pos_arr[pi]/64] & ((linearpart_t)1 << (pos_arr[pi] % 64))){
working_mat[j][0] ^= (working_mat[pi][0]);
working_mat[j][1] ^= (working_mat[pi][1]);
}
}
}
// back
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
for (int j = 0; j < pi; j++) {
if (working_mat[j][pos_arr[pi] / 64]
& ((linearpart_t) 1 << (pos_arr[pi] % 64))) {
working_mat[j][0] ^= (working_mat[pi][0]);
working_mat[j][1] ^= (working_mat[pi][1]);
}
}
}
if (rank == unknown_num) {
// only one solution.
solutions[0] = 0;
;
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0 ) {
continue;
}
if (working_mat[pi][0] & (linearpart_t)1) {
solutions[0] ^= ((value_t)1 << (pos_arr[pi]-1));
}
}
return 1;
} else {
// now troubles come
solutions[0] = 0;
value_t sol_num = 1;
bool appear[UNKNOWN_NUM + 1] = { 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
appear[pos_arr[pi]] = true;
if (working_mat[pi][0] & (linearpart_t)1) {
solutions[0] ^= ((value_t)1 << (pos_arr[pi]-1));
}
}
// duplicate solutions.
for (int i = 1; i < UNKNOWN_NUM+1; i++) { // liting revised
if (appear[i] == false) {
for (int j = 0; j < sol_num; j++) {
// bear revised
solutions[j + sol_num] = (solutions[j]) ^ ((value_t)1 << (i-1));
}
// bear added
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0 ) {
continue;
}
if(i < 64){
for (int j = 0; j < sol_num * ((working_mat[pi][0] & (((linearpart_t) 1) << i)) != 0); j++) {
solutions[j + sol_num] ^= ((value_t) 1 << (pos_arr[pi] - 1));
}
}else{
for (int j = 0; j < sol_num * ((working_mat[pi][1] & (((linearpart_t) 1) << i-64)) != 0); j++) {
solutions[j + sol_num] ^= ((value_t) 1 << (pos_arr[pi] - 1));
}
}
}
sol_num *= 2;
}
}
return sol_num;
}
}
static inline __device__ value_t gauss(value_t solutions[SOL_MAX_NUM], linearpart_t working_mat[POLY_NUM][2],
const int poly_num, const int unknown_num) {
// bear revised
int pos_arr[POLY_NUM]; // bear revised
int rank = 0;
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
pos_arr[pi] = largestpos_2(working_mat[pi][0], working_mat[pi][1],unknown_num + 1);
rank++;
if (pos_arr[pi] == 0) {
return 0;
}
for (int j = pi + 1; j < POLY_NUM; j++) {
if (working_mat[j][pos_arr[pi] / 64]
& ((linearpart_t) 1 << (pos_arr[pi] % 64))) {
working_mat[j][0] ^= (working_mat[pi][0]);
working_mat[j][1] ^= (working_mat[pi][1]);
}
}
}
// back
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
for (int j = 0; j < pi; j++) {
if (working_mat[j][pos_arr[pi] / 64]
& ((linearpart_t) 1 << (pos_arr[pi] % 64))) {
working_mat[j][0] ^= (working_mat[pi][0]);
working_mat[j][1] ^= (working_mat[pi][1]);
}
}
}
if (rank == unknown_num) {
// only one solution.
solutions[0]= 0;
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
if (working_mat[pi][0] & (linearpart_t) 1) {
solutions[0] ^= ((value_t)1 << (pos_arr[pi]-1));
}
}
return 1;
} else {
// now troubles come
// now troubles come
solutions[0] = 0;
value_t sol_num = 1;
//liting revised
bool appear[UNKNOWN_NUM + 1] = { 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
appear[pos_arr[pi]] = true;
if (working_mat[pi][0] & (linearpart_t) 1) {
solutions[0] ^= ((value_t)1 << (pos_arr[pi]-1));
}
}
// duplicate solutions.
for (int i = 1; i < UNKNOWN_NUM + 1; i++) { // liting revised
if (appear[i] == false) {
for (int j = 0; j < sol_num; j++) {
// bear revised
solutions[j + sol_num] = (solutions[j]) ^ ((value_t)1 << (i-1));
}
// bear added
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0 ) {
continue;
}
if(i < 64){
for (int j = 0; j < sol_num * ((working_mat[pi][0] & (((linearpart_t) 1) << i)) != 0); j++) {
solutions[j + sol_num] ^= ((value_t) 1 << (pos_arr[pi] - 1));
}
}else{
for (int j = 0; j < sol_num * ((working_mat[pi][1] & (((linearpart_t) 1) << i-64)) != 0); j++) {
solutions[j + sol_num] ^= ((value_t) 1 << (pos_arr[pi] - 1));
}
}
}
sol_num *= 2;
}
}
return sol_num;
}
}
__global__ void solveLinear(const linearpart_t *d_working_mat_copy,
const squarepart_t *d_const_mat, value_t *d_val, char *d_bound, value_t *d_sol_total,value_t* result) {
int thidx = blockDim.x * blockIdx.x + threadIdx.x;
value_t val = d_val[thidx];
char bound = d_bound[thidx];
value_t res[2 * RESULT_MAX_NUM];
char res_num = 0;
linearpart_t working_mat[POLY_NUM][2]; // initialized as the const part of linear matrix. also used as the results of linear part.
linearpart_t working_mat_copy[POLY_NUM][2];
squarepart_t const_mat[POLY_NUM];
d_sol_total[thidx] = 0;
oripoly_t cstpoly[3] = {0, 0, 0};
//copy data from device
for(int i = 0; i < 2 * RESULT_MAX_NUM; i++){
res[i] = 0;
}
res[0] = result[thidx * 2 * RESULT_MAX_NUM];
res[1] = result[thidx * 2 * RESULT_MAX_NUM + 1];
for (int i = 0; i < POLY_NUM; i++) {
working_mat_copy[i][0] = d_working_mat_copy[thidx * POLY_NUM * 2 + i*2];
working_mat_copy[i][1] = d_working_mat_copy[thidx * POLY_NUM * 2 + i*2 + 1];
const_mat[i] = d_const_mat[thidx * POLY_NUM + i];
}
//todo to be finished!!!!
// for(int i=0; i< NONLINEAR_NUM;i++){
// for(int j = 0; j < PARA_NUM + UNKNOWN_NUM + 1; j++){
// polys[i][j][0] = d_polys_mat[i * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + j * 3];
// polys[i][j][1] = d_polys_mat[i * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + j * 3 + 1];
// polys[i][j][2] = d_polys_mat[i * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + j * 3 + 2];
//
// }
// }
// main loop.
for (value_t count = 1; count < (1 << ENUM_NUM); count++) {
// generate the next gray code
int pos = 64-__ffsll(__brevll(count ^ (count - 1)));
val = val ^ ((value_t) 1 << pos);
for (int pi = 0; pi < POLY_NUM; pi++) {
working_mat_copy[pi][0] ^= d_linear_mat[pos * POLY_NUM * 2 + pi * 2];
working_mat_copy[pi][1] ^= d_linear_mat[pos * POLY_NUM * 2 + pi * 2 + 1];
const_mat[pi] ^= d_square_mat[pos * POLY_NUM + pi];
working_mat[pi][0] = working_mat_copy[pi][0];
working_mat[pi][1] = working_mat_copy[pi][1];
value_t w = const_mat[pi] & val;
working_mat[pi][0] ^= (bool)((__popcll((unsigned long long int)w)) & (value_t) 1);
}
value_t solutions[SOL_MAX_NUM];
value_t sol_num = 0;
// gauss
sol_num = gauss(solutions, working_mat, POLY_NUM, UNKNOWN_NUM);
d_sol_total[thidx] += sol_num;
// verify on 3 round keccak.
for(int s = 0;s < sol_num;s++){
int zero_num = 0;
int one_num = 0;
int op;
for (op = 0; op < NONLINEAR_NUM; op++) {
cstpoly[0] = d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + UNKNOWN_NUM) * 3];
cstpoly[1] = d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + UNKNOWN_NUM) * 3 + 1];
cstpoly[2] = d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + UNKNOWN_NUM) * 3 + 2];
// cstpoly[0] = polys[op][PARA_NUM + UNKNOWN_NUM][0];
// cstpoly[1] = polys[op][PARA_NUM + UNKNOWN_NUM][1];
// cstpoly[2] = polys[op][PARA_NUM + UNKNOWN_NUM][2];
// for parameters.
for (int pa = 0; pa < PARA_NUM; pa++) {
if (val & ((value_t) 1 << pa)) {
cstpoly[0] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM - pa -1) * 3];
cstpoly[1] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM - pa -1) * 3 + 1];
cstpoly[2] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM - pa -1) * 3 + 2];
// cstpoly[0] ^= polys[op][PARA_NUM - pa -1][0];
// cstpoly[1] ^= polys[op][PARA_NUM - pa -1][1];
// cstpoly[2] ^= polys[op][PARA_NUM - pa -1][2];
}
}
for (int un = 0; un < UNKNOWN_NUM; un++) {
if (solutions[s] & ((value_t) 1 << un)) {
cstpoly[0] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + un) * 3];
cstpoly[1] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + un) * 3 + 1];
cstpoly[2] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + un) * 3 + 2];
// cstpoly[0] ^= polys[op][ PARA_NUM + un][0];
// cstpoly[1] ^= polys[op][ PARA_NUM + un][1];
// cstpoly[2] ^= polys[op][ PARA_NUM + un][2];
}
}
// evaluate
cstpoly[0] = cstpoly[0] & val;
cstpoly[1] = cstpoly[1] & solutions[s];
cstpoly[2] = cstpoly[2] & ((oripoly_t)1);
value_t w = cstpoly[0] ^ cstpoly[1] ^ cstpoly[2];
w = (w) ^ (w >> 32);
w = (w) ^ (w >> 16);
w = (w) ^ (w >> 8);
w = (w) ^ (w >> 4);
w = (w) ^ (w >> 2);
w = (w) ^ (w >> 1);
if (w & (value_t) 1) {
zero_num ++;
if(zero_num > NONLINEAR_NUM - bound){
break;
}
}else{
one_num++;
}
}
if(zero_num + one_num == NONLINEAR_NUM && one_num >= bound){
if(one_num > bound){
bound = one_num;
res_num = 0;
for(int ir = 0; ir < 2 * RESULT_MAX_NUM; ir++ ){
res[ir] = 0;
}
}
if(res_num < RESULT_MAX_NUM){
res[res_num * 2] = val;
res[res_num * 2 + 1 ] = solutions[s];
res_num ++;
}
// printf("\nval:%lu,sol:%lu,count:%d(bound:%d) thidx:%d \n",val,solutions[s],one_num,bound, thidx);
}
}
}
d_bound[thidx] = bound;
for(int i = 0; i < 2 * RESULT_MAX_NUM; i++){
result[thidx * 2 * RESULT_MAX_NUM + i] = res[i];
}
}
int main(int argc, char** argv) {
char bound = 0;
const int para_num = PARA_NUM;
const int enum_num = ENUM_NUM;
const int ori_num = NONLINEAR_NUM;
//const int set_num = para_num - enum_num;
value_t set_val = atol(argv[1])<<THREADS_SHIFT;
// value_t set_val = 0;
const int poly_num = POLY_NUM;
const int unknown_num = UNKNOWN_NUM;
linearpart_t linear_mat[para_num][poly_num][2];
linearpart_t working_mat[poly_num][2]; // initialized as the const part of linear matrix. also used as the results of linear part.
linearpart_t working_mat_copy[poly_num][2];
linearpart_t working_mat_file[poly_num][2];
squarepart_t square_mat[para_num][poly_num];
squarepart_t const_mat[poly_num]; // used to compute the const part from square polys.
oripoly_t polys[ori_num][para_num + unknown_num + 1][3];
oripoly_t cstpoly[3];
// cudaSetDevice(0);
cudaSetDevice(atoi(argv[2]));
FILE *in1 = fopen("../data/mat_files/linear_mat.txt", "r+");
FILE *in2 = fopen("../data/mat_files/square_mat.txt", "r+");
FILE *in3 = fopen("../data/mat_files/poly_mat.txt", "r+");
FILE *in4 = fopen("../data/mat_files/working_mat.txt", "r+");
FILE *out = fopen("../data/mat_files/solve_mat_result.txt","a+");
char c1, c2, c3, c4;
for (int i = 0; i < para_num; i++) {
for (int j = 0; j < poly_num; j++) {
linear_mat[i][j][0] = 0;
linear_mat[i][j][1] = 0;
square_mat[i][j] = 0;
for (int k = 0; k < 128; k++) {
fscanf(in1, "%c", &c1);
while (c1 != '0' && c1 != '1') {
fscanf(in1, "%c", &c1);
}
if (c1 == '1') {
linear_mat[i][j][k/64] ^= ((linearpart_t) 1 << (k-((int)k/64)*64));
}
}
for (int k = 0; k < para_num; k++) {
fscanf(in2, "%c", &c2);
while (c2 != '0' && c2 != '1') {
fscanf(in2, "%c", &c2);
}
if (c2 == '1') {
square_mat[i][j] ^= ((squarepart_t) 1 << (para_num - 1) - k);
}
}
}
}
for (int i = 0; i < ori_num; i++) {
for (int j = 0; j < para_num + unknown_num + 1; j++) {
polys[i][j][0] = 0;
polys[i][j][1] = 0;
polys[i][j][2] = 0;
for (int k = 0; k < 192; k++) {
fscanf(in3, "%c", &c3);
while (c3 != '0' && c3 != '1') {
fscanf(in3, "%c", &c3);
}
if (k < para_num && c3 == '1') {
polys[i][j][0] ^= ((oripoly_t) 1 << (para_num - k -1));
} else if (k >= para_num && k<para_num+unknown_num && c3 == '1') {
polys[i][j][1] ^= ((oripoly_t) 1 << (k - para_num));
}else if(c3 == '1'){
polys[i][j][2] ^= ((oripoly_t) 1);
}
}
}
}
for (int i = 0; i < poly_num; i++) {
working_mat[i][0] = 0;
working_mat[i][1] = 0;
for (int j = 0; j < 128; j++) {
fscanf(in4, "%c", &c4);
while (c4 != '0' && c4 != '1') {
fscanf(in4, "%c", &c4);
}
if (c4 == '1') {
working_mat[i][(int)j/64] ^= ((linearpart_t) 1 << (j - ((int)j/64)*64));
}
}
working_mat_file[i][0] = working_mat[i][0];
working_mat_file[i][1] = working_mat[i][1];
}
fclose(in1);
fclose(in2);
fclose(in3);
fclose(in4);
printf("finish reading file!\n");
linearpart_t linear_mat_enum[ENUM_NUM * POLY_NUM * 2];
squarepart_t square_mat_enum[ENUM_NUM * POLY_NUM];
oripoly_t polys_mat_enum[ori_num * (para_num + unknown_num + 1) * 3 ];
for (int i = 0; i < ENUM_NUM; i++) {
for (int j = 0; j < POLY_NUM; j++) {
for (int k = 0; k < 2; k++) {
linear_mat_enum[i * POLY_NUM * 2 + j * 2 + k] =
linear_mat[i][j][k];
}
}
}
for (int i = 0; i < ENUM_NUM; i++) {
for (int j = 0; j < POLY_NUM; j++) {
square_mat_enum[i * POLY_NUM + j] = square_mat[i][j];
}
}
for(int i = 0; i < ori_num; i++){
for(int j = 0; j < para_num + unknown_num + 1; j++){
for(int k = 0; k < 3; k++){
polys_mat_enum[i * (para_num + unknown_num + 1) * 3 + j * 3 + k] = polys[i][j][k];
}
}
}
cudaMemcpyToSymbol(d_linear_mat, linear_mat_enum,
2 * ENUM_NUM * POLY_NUM * sizeof(linearpart_t));
cudaMemcpyToSymbol(d_square_mat, square_mat_enum,
ENUM_NUM * POLY_NUM * sizeof(squarepart_t));
cudaMemcpyToSymbol(d_polys_mat, polys_mat_enum,
3 * NONLINEAR_NUM * (PARA_NUM + UNKNOWN_NUM + 1) * sizeof(oripoly_t));
printf("finish copying device memory!\n");
cudaError_t err = cudaSuccess;
int thidx = BLOCK_NUM * THREAD_NUM;
value_t *d_val = NULL;
err = cudaMalloc((void **) &d_val, thidx * sizeof(value_t));
if (err != cudaSuccess) {
printf("Failed to allocate device value (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
char *d_bound = NULL;
err = cudaMalloc((void **) &d_bound, thidx * sizeof(char));
if (err != cudaSuccess) {
printf("Failed to allocate device value (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
value_t *d_sol_total = NULL;
err = cudaMalloc((void **) &d_sol_total, thidx * sizeof(value_t));
if (err != cudaSuccess) {
printf("Failed to allocate device value (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
linearpart_t *d_working_mat_copy = NULL;
err = cudaMalloc((void **) &d_working_mat_copy,
thidx * poly_num * 2 * sizeof(linearpart_t));
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to allocate device working_mat_copy (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
squarepart_t *d_const_mat = NULL;
err = cudaMalloc((void **) &d_const_mat,
thidx * poly_num * sizeof(squarepart_t));
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to allocate devices const_mat (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy oripolys from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
value_t h_result[thidx * RESULT_MAX_NUM * 2];
for(int i = 0; i < thidx * RESULT_MAX_NUM * 2 ; i ++){
h_result[i] = 0;
}
printf("finish allocate device memory!\n");
int s_total_p0 = 0;
value_t *val_arr = (value_t*)calloc(thidx, sizeof(value_t));
linearpart_t *working_mat_copy_arr = (linearpart_t*)calloc(thidx * POLY_NUM * 2, sizeof(linearpart_t));
squarepart_t *const_mat_arr = (squarepart_t*)calloc(thidx * POLY_NUM, sizeof(squarepart_t));
char *bound_arr = (char*)calloc(thidx, sizeof(short));
for (int thi = 0; thi < thidx; thi++) {
value_t sol_num = 0;
value_t solutions[SOL_MAX_NUM];
//int sol_total = 0;
value_t val = (set_val + (value_t) thi) << ENUM_NUM;
val_arr[thi] = val;
for (int pi = 0; pi < POLY_NUM; pi++) {
working_mat[pi][0] = working_mat_file[pi][0];
working_mat[pi][1] = working_mat_file[pi][1];
const_mat[pi] = 0;
}
for (int pos = enum_num; pos < para_num; pos++) {
if (val & ((value_t) 1 << pos)) {
for (int pi = 0; pi < poly_num; pi++) {
working_mat[pi][0] ^= linear_mat[pos][pi][0];
working_mat[pi][1] ^= linear_mat[pos][pi][1];
}
for (int pi = 0; pi < poly_num; pi++) {
const_mat[pi] ^= square_mat[pos][pi];
}
}
}
for (int i = 0; i < poly_num; i++) {
working_mat_copy[i][0] = working_mat[i][0];
working_mat_copy[i][1] = working_mat[i][1];
working_mat_copy_arr[thi * POLY_NUM * 2 + 2 * i] = working_mat_copy[i][0];
working_mat_copy_arr[thi * POLY_NUM * 2 + 2 * i + 1] = working_mat_copy[i][1];
const_mat_arr[thi * POLY_NUM + i] = const_mat[i];
}
for (int pi = 0; pi < poly_num; pi++) {
value_t w = const_mat[pi] & val;
w = (w) ^ (w >> 32);
w = (w) ^ (w >> 16);
w = (w) ^ (w >> 8);
w = (w) ^ (w >> 4);
w = (w) ^ (w >> 2);
w = (w) ^ (w >> 1);
if (w & (value_t) 1) {
working_mat[pi][0] ^= (linearpart_t) 1;
}
}
sol_num = gauss_host(working_mat, POLY_NUM, UNKNOWN_NUM, solutions);
s_total_p0 += sol_num;
//verify the solutions
for (int s = 0; s < sol_num; s++) {
int one_num = 0;
int zero_num = 0;
int op;
for (op = 0; op < ori_num; op++) {
cstpoly[0] = polys[op][para_num + unknown_num][0];
cstpoly[1] = polys[op][para_num + unknown_num][1];
cstpoly[2] = polys[op][para_num + unknown_num][2];
// for parameters.
for (int pa = 0; pa < para_num; pa++) {
if (val & ((value_t) 1 << pa)) {
cstpoly[0] ^= polys[op][para_num - pa - 1][0];
cstpoly[1] ^= polys[op][para_num - pa - 1][1];
cstpoly[2] ^= polys[op][para_num - pa - 1][2];
}
}
for (int un = 0; un < unknown_num; un++) {
if (solutions[s] & ((value_t) 1 << un)) {
cstpoly[0] ^= polys[op][para_num + un][0];
cstpoly[1] ^= polys[op][para_num + un][1];
cstpoly[2] ^= polys[op][para_num + un][2];
}
}
cstpoly[2] ^= polys[op][unknown_num + para_num][2];
// evaluate
cstpoly[0] = cstpoly[0] & val;
cstpoly[1] = cstpoly[1] & solutions[s];
cstpoly[2] = cstpoly[2] & ((oripoly_t) 1);
value_t w = cstpoly[0] ^ cstpoly[1] ^ cstpoly[2];
w = (w) ^ (w >> 32);
w = (w) ^ (w >> 16);
w = (w) ^ (w >> 8);
w = (w) ^ (w >> 4);
w = (w) ^ (w >> 2);
w = (w) ^ (w >> 1);
if (w & (value_t) 1) {
zero_num++;
if (zero_num > ori_num - bound) {
break;
}
} else {
one_num++;
}
}
if (zero_num + one_num == ori_num && one_num > bound) {
//fprintf(out, "n=%d,val=0x%016x,sol=0x%016x\n",one_num, val, solutions[s]);
h_result[thi * RESULT_MAX_NUM * 2] = val;
h_result[thi * RESULT_MAX_NUM * 2 + 1] = solutions[s];
fprintf(out,"n=%d,val=0x",one_num);
//fprintf(out,"val=0x%016x,sol=0x%016x\n",h_result[i * 2 * RESULT_MAX_NUM + j * 2],h_result[i*2 * RESULT_MAX_NUM + j * 2 + 1]);
fprintf(out, "%08x", (unsigned int)(val >> 32));
fprintf(out, "%08x,", (unsigned int)(val & 0xFFFFFFFFULL));
fprintf(out, "sol=0x");
fprintf(out, "%08x", (unsigned int)(solutions[s] >> 32));
fprintf(out, "%08x\n", (unsigned int)(solutions[s] & 0xFFFFFFFFULL));
}
if (one_num > bound) {
bound = one_num;
bound_arr[thi] = bound;
}
}
}
for(int i = 0; i < thidx;i++){
if(bound_arr[i] < bound){
bound_arr[i] = bound;
h_result[i * RESULT_MAX_NUM * 2] = 0;
h_result[i * RESULT_MAX_NUM * 2 + 1] = 0;
}
}
// for(int i = 0; i < thidx;i++){
// printf("No. %d, bound:%d, val:%lu, sol:%lu\n",i,bound_arr[i],h_result[i * RESULT_MAX_NUM * 2],h_result[i * RESULT_MAX_NUM * 2+1]);
// }
printf("finish cpu computing! the bound is %d now...\n", bound);
//begin device part
err = cudaMemcpy(d_val, val_arr, thidx * sizeof(value_t),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("Failed to copy value from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_bound, bound_arr, thidx * sizeof(char),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("Failed to copy bound from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
value_t *d_result = NULL;
err = cudaMalloc((void **) &d_result, thidx * RESULT_MAX_NUM * 2 * sizeof(value_t));
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to allocate devices result (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_result, h_result,
thidx * RESULT_MAX_NUM * 2 * sizeof(value_t),cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy result from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_working_mat_copy, working_mat_copy_arr,
thidx * 2 * poly_num * sizeof(linearpart_t), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy working_mat_copy from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_const_mat, const_mat_arr,
thidx * poly_num * sizeof(squarepart_t), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy const_mat from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("enum num : %d\nblock num : %d\nthread num : %d\n", ENUM_NUM,
BLOCK_NUM, THREAD_NUM);
cudaEvent_t start1;
cudaEventCreate(&start1);
cudaEvent_t stop1;
cudaEventCreate(&stop1);
cudaEventRecord(start1, NULL);
printf("begin solve linear system!\n");
solveLinear<<<BLOCK_NUM, THREAD_NUM>>>(d_working_mat_copy, d_const_mat,
d_val, d_bound, d_sol_total,d_result);
cudaEventRecord(stop1, NULL);
cudaEventSynchronize(stop1);
float msecTotal1 = 0.0f;
cudaEventElapsedTime(&msecTotal1, start1, stop1);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch solveLinear kernel (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
value_t h_sol_total[thidx];
err = cudaMemcpy(h_sol_total, d_sol_total, thidx * sizeof(value_t),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy total solution numbers from device to host (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_result, d_result, thidx * RESULT_MAX_NUM * 2 * sizeof(value_t),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy result from device to host (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(bound_arr, d_bound, thidx * sizeof(char),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy bound from device to host (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("\n------------ begin sort ---------------\n");
// for(int i = 0; i < thidx; i++){
// printf("bound:%d\n",bound_arr[i]);
// printf("1. val:%lu, sol:%lu\n", h_result[i * 10],h_result[i*10 + 1]);
// printf("2. val:%lu, sol:%lu\n", h_result[i * 10 + 2],h_result[i*10 + 3]);
// printf("3. val:%lu, sol:%lu\n", h_result[i * 10 + 4],h_result[i*10 + 5]);
// printf("4. val:%lu, sol:%lu\n", h_result[i * 10 + 6],h_result[i*10 + 7]);
// printf("5. val:%lu, sol:%lu\n", h_result[i * 10 + 8],h_result[i*10 + 9]);
//
// }
for(int i = 0; i < thidx; i++){
for(int j = i + 1; j < thidx; j++){
if(bound_arr[i] > bound_arr[j]){
char temp = bound_arr[i];
bound_arr[i] = bound_arr[j];
bound_arr[j] = temp;
for(int ri = 0;ri < 2 * RESULT_MAX_NUM; ri ++){
value_t temp = h_result[i * 2 * RESULT_MAX_NUM + ri];
h_result[i * 2 * RESULT_MAX_NUM + ri] = h_result[j * 2 * RESULT_MAX_NUM + ri];
h_result[j * 2 * RESULT_MAX_NUM + ri] = temp;
}
}
}
}
// printf("\n------------ finish sort ---------------\n");
printf("bound:%d\n",bound_arr[thidx-1]);
// printf("1. val:%lu, sol:%lu\n", h_result[i * 10],h_result[i*10 + 1]);
// printf("2. val:%lu, sol:%lu\n", h_result[i * 10 + 2],h_result[i*10 + 3]);
// printf("3. val:%lu, sol:%lu\n", h_result[i * 10 + 4],h_result[i*10 + 5]);
// printf("4. val:%lu, sol:%lu\n", h_result[i * 10 + 6],h_result[i*10 + 7]);
// printf("5. val:%lu, sol:%lu\n", h_result[i * 10 + 8],h_result[i*10 + 9]);
for(int i = thidx - 1;i >= 0;i--){
for(int j = 0,k = 0; j < RESULT_MAX_NUM; j++, k++){
if(h_result[i * 2 * RESULT_MAX_NUM + j * 2] != 0 && h_result[i*2 * RESULT_MAX_NUM + j * 2 + 1]!= 0){
fprintf(out,"n=%d,val=0x",bound_arr[i]);
//fprintf(out,"val=0x%016x,sol=0x%016x\n",h_result[i * 2 * RESULT_MAX_NUM + j * 2],h_result[i*2 * RESULT_MAX_NUM + j * 2 + 1]);
fprintf(out, "%08x", (unsigned int)(h_result[i * 2 * RESULT_MAX_NUM + j * 2] >> 32));
fprintf(out, "%08x,", (unsigned int)(h_result[i * 2 * RESULT_MAX_NUM + j * 2] & 0xFFFFFFFFULL));
fprintf(out, "sol=0x");
fprintf(out, "%08x", (unsigned int)(h_result[i*2 * RESULT_MAX_NUM + j * 2 + 1] >> 32));
fprintf(out, "%08x\n", (unsigned int)(h_result[i*2 * RESULT_MAX_NUM + j * 2 + 1] & 0xFFFFFFFFULL));
}
// fprintf(out,"2. val:%lu, sol:%lu\n", h_result[i * 2 * RESULT_MAX_NUM + 2],h_result[i*2 * RESULT_MAX_NUM + 3]);
}
if(bound_arr[i] > bound_arr[i-1]){
break;
}
}
printf("time:%.3lf ms\n---------------------------------------\n", msecTotal1);
cudaFree(d_working_mat_copy);
cudaFree(d_const_mat);
cudaFree(d_val);
cudaFree(d_bound);
cudaFree(d_sol_total);
cudaFree(d_result);
}
|
20826082c1b3d9348bbe7d1beb0aca7e43990c5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <boost/graph/buffer_concepts.hpp>
namespace pcl
{
namespace device
{
namespace kinfuLS
{
template<typename T>
__global__ void
initializeVolume (PtrStep<T> volume)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < VOLUME_X && y < VOLUME_Y)
{
T *pos = volume.ptr(y) + x;
int z_step = VOLUME_Y * volume.step / sizeof(*pos);
#pragma unroll
for(int z = 0; z < VOLUME_Z; ++z, pos+=z_step)
pack_tsdf (0.f, 0, *pos);
}
}
template<typename T>
__global__ void
clearSliceKernel (PtrStep<T> volume, pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//compute relative indices
int idX, idY;
if(x <= minBounds.x)
idX = x + buffer.voxels_size.x;
else
idX = x;
if(y <= minBounds.y)
idY = y + buffer.voxels_size.y;
else
idY = y;
if ( x < buffer.voxels_size.x && y < buffer.voxels_size.y)
{
if( (idX >= minBounds.x && idX <= maxBounds.x) || (idY >= minBounds.y && idY <= maxBounds.y) )
{
// BLACK ZONE => clear on all Z values
///Pointer to the first x,y,0
T *pos = volume.ptr(y) + x;
///Get the step on Z
int z_step = buffer.voxels_size.y * volume.step / sizeof(*pos);
///Get the size of the whole TSDF memory
int size = buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1;
///Move along z axis
#pragma unroll
for(int z = 0; z < buffer.voxels_size.z; ++z, pos+=z_step)
{
///If we went outside of the memory, make sure we go back to the beginning of it
if(pos > buffer.tsdf_memory_end)
pos = pos - size;
if (pos >= buffer.tsdf_memory_start && pos <= buffer.tsdf_memory_end) // quickfix for http://dev.pointclouds.org/issues/894
pack_tsdf (0.f, 0, *pos);
}
}
else /* if( idX > maxBounds.x && idY > maxBounds.y)*/
{
///RED ZONE => clear only appropriate Z
///Pointer to the first x,y,0
T *pos = volume.ptr(y) + x;
///Get the step on Z
int z_step = buffer.voxels_size.y * volume.step / sizeof(*pos);
///Get the size of the whole TSDF memory
int size = buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1;
///Move pointer to the Z origin
pos+= minBounds.z * z_step;
///If the Z offset is negative, we move the pointer back
if(maxBounds.z < 0)
pos += maxBounds.z * z_step;
///We make sure that we are not already before the start of the memory
if(pos < buffer.tsdf_memory_start)
pos = pos + size;
int nbSteps = abs(maxBounds.z);
#pragma unroll
for(int z = 0; z < nbSteps; ++z, pos+=z_step)
{
///If we went outside of the memory, make sure we go back to the beginning of it
if(pos > buffer.tsdf_memory_end)
pos = pos - size;
if (pos >= buffer.tsdf_memory_start && pos <= buffer.tsdf_memory_end) // quickfix for http://dev.pointclouds.org/issues/894
pack_tsdf (0.f, 0, *pos);
}
} //else /* if( idX > maxBounds.x && idY > maxBounds.y)*/
} // if ( x < VOLUME_X && y < VOLUME_Y)
} // clearSliceKernel
void
initVolume (PtrStep<short2> volume)
{
dim3 block (16, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
hipLaunchKernelGGL(( initializeVolume), dim3(grid), dim3(block), 0, 0, volume);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
}
}
}
namespace pcl
{
namespace device
{
namespace kinfuLS
{
struct Tsdf
{
enum
{
CTA_SIZE_X = 32, CTA_SIZE_Y = 8,
MAX_WEIGHT = 1 << 7
};
mutable PtrStep<short2> volume;
float3 cell_size;
Intr intr;
Mat33 Rcurr_inv;
float3 tcurr;
PtrStepSz<ushort> depth_raw; //depth in mm
float tranc_dist_mm;
__device__ __forceinline__ float3
getVoxelGCoo (int x, int y, int z) const
{
float3 coo = make_float3 (x, y, z);
coo += 0.5f; //shift to cell center;
coo.x *= cell_size.x;
coo.y *= cell_size.y;
coo.z *= cell_size.z;
return coo;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
short2 *pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(*pos);
for (int z = 0; z < VOLUME_Z; ++z, pos += elem_step)
{
float3 v_g = getVoxelGCoo (x, y, z); //3 // p
//transform to curr cam coo space
float3 v = Rcurr_inv * (v_g - tcurr); //4
int2 coo; //project to current cam
coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx);
coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy);
if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6
{
int Dp = depth_raw.ptr (coo.y)[coo.x];
if (Dp != 0)
{
float xl = (coo.x - intr.cx) / intr.fx;
float yl = (coo.y - intr.cy) / intr.fy;
float lambda_inv = rsqrtf (xl * xl + yl * yl + 1);
float sdf = 1000 * norm (tcurr - v_g) * lambda_inv - Dp; //mm
sdf *= (-1);
if (sdf >= -tranc_dist_mm)
{
float tsdf = fmin (1.f, sdf / tranc_dist_mm);
int weight_prev;
float tsdf_prev;
//read and unpack
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
}
}
};
__global__ void
integrateTsdfKernel (const Tsdf tsdf) {
tsdf ();
}
__global__ void
tsdf2 (PtrStep<short2> volume, const float tranc_dist_mm, const Mat33 Rcurr_inv, float3 tcurr,
const Intr intr, const PtrStepSz<ushort> depth_raw, const float3 cell_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
short2 *pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(short2);
float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_x = Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z;
float v_y = Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z;
float v_z = Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z;
//#pragma unroll
for (int z = 0; z < VOLUME_Z; ++z)
{
float3 vr;
vr.x = v_g_x;
vr.y = v_g_y;
vr.z = (v_g_z + z * cell_size.z);
float3 v;
v.x = v_x + Rcurr_inv.data[0].z * z * cell_size.z;
v.y = v_y + Rcurr_inv.data[1].z * z * cell_size.z;
v.z = v_z + Rcurr_inv.data[2].z * z * cell_size.z;
int2 coo; //project to current cam
coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx);
coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy);
if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6
{
int Dp = depth_raw.ptr (coo.y)[coo.x]; //mm
if (Dp != 0)
{
float xl = (coo.x - intr.cx) / intr.fx;
float yl = (coo.y - intr.cy) / intr.fy;
float lambda_inv = rsqrtf (xl * xl + yl * yl + 1);
float sdf = Dp - norm (vr) * lambda_inv * 1000; //mm
if (sdf >= -tranc_dist_mm)
{
float tsdf = fmin (1.f, sdf / tranc_dist_mm);
int weight_prev;
float tsdf_prev;
//read and unpack
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
pos += elem_step;
} /* for(int z = 0; z < VOLUME_Z; ++z) */
} /* __global__ */
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
integrateTsdfVolume (const PtrStepSz<ushort>& depth_raw, const Intr& intr, const float3& volume_size,
const Mat33& Rcurr_inv, const float3& tcurr, float tranc_dist,
PtrStep<short2> volume)
{
Tsdf tsdf;
tsdf.volume = volume;
tsdf.cell_size.x = volume_size.x / VOLUME_X;
tsdf.cell_size.y = volume_size.y / VOLUME_Y;
tsdf.cell_size.z = volume_size.z / VOLUME_Z;
tsdf.intr = intr;
tsdf.Rcurr_inv = Rcurr_inv;
tsdf.tcurr = tcurr;
tsdf.depth_raw = depth_raw;
tsdf.tranc_dist_mm = tranc_dist*1000; //mm
dim3 block (Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
#if 0
//tsdf2<<<grid, block>>>(volume, tranc_dist, Rcurr_inv, tcurr, intr, depth_raw, tsdf.cell_size);
hipLaunchKernelGGL(( integrateTsdfKernel), dim3(grid), dim3(block), 0, 0, tsdf);
#endif
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
}
}
}
namespace pcl
{
namespace device
{
namespace kinfuLS
{
__global__ void
scaleDepth (const PtrStepSz<ushort> depth, PtrStep<float> scaled, const Intr intr)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
int Dp = depth.ptr (y)[x];
float xl = (x - intr.cx) / intr.fx;
float yl = (y - intr.cy) / intr.fy;
float lambda = sqrtf (xl * xl + yl * yl + 1);
scaled.ptr (y)[x] = Dp * lambda/1000.f; //meters
}
__global__ void
tsdf23 (const PtrStepSz<float> depthScaled, PtrStep<short2> volume,
const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size, const pcl::gpu::kinfuLS::tsdf_buffer buffer)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= buffer.voxels_size.x || y >= buffer.voxels_size.y)
return;
float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y;
float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx;
float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy;
float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z);
float z_scaled = 0;
float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx;
float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy;
float tranc_dist_inv = 1.0f / tranc_dist;
short2* pos = volume.ptr (y) + x;
// shift the pointer to relative indices
shift_tsdf_pointer(&pos, buffer);
int elem_step = volume.step * buffer.voxels_size.y / sizeof(short2);
//#pragma unroll
for (int z = 0; z < buffer.voxels_size.z;
++z,
v_g_z += cell_size.z,
z_scaled += cell_size.z,
v_x += Rcurr_inv_0_z_scaled,
v_y += Rcurr_inv_1_z_scaled,
pos += elem_step)
{
// As the pointer is incremented in the for loop, we have to make sure that the pointer is never outside the memory
if(pos > buffer.tsdf_memory_end)
pos -= (buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1);
float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled);
if (inv_z < 0)
continue;
// project to current cam
int2 coo =
{
__float2int_rn (v_x * inv_z + intr.cx),
__float2int_rn (v_y * inv_z + intr.cy)
};
if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6
{
float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters
float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm);
if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters
{
float tsdf = fmin (1.0f, sdf * tranc_dist_inv);
//read and unpack
float tsdf_prev;
int weight_prev;
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
} // for(int z = 0; z < VOLUME_Z; ++z)
} // __global__
__global__ void
tsdf23normal_hack (const PtrStepSz<float> depthScaled, PtrStep<short2> volume,
const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
const float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
const float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y;
float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx;
float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy;
float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z);
float z_scaled = 0;
float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx;
float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy;
float tranc_dist_inv = 1.0f / tranc_dist;
short2* pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(short2);
//#pragma unroll
for (int z = 0; z < VOLUME_Z;
++z,
v_g_z += cell_size.z,
z_scaled += cell_size.z,
v_x += Rcurr_inv_0_z_scaled,
v_y += Rcurr_inv_1_z_scaled,
pos += elem_step)
{
float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled);
if (inv_z < 0)
continue;
// project to current cam
int2 coo =
{
__float2int_rn (v_x * inv_z + intr.cx),
__float2int_rn (v_y * inv_z + intr.cy)
};
if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6
{
float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters
float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm);
if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters
{
float tsdf = fmin (1.0f, sdf * tranc_dist_inv);
bool integrate = true;
if ((x > 0 && x < VOLUME_X-2) && (y > 0 && y < VOLUME_Y-2) && (z > 0 && z < VOLUME_Z-2))
{
const float qnan = numeric_limits<float>::quiet_NaN();
float3 normal = make_float3(qnan, qnan, qnan);
float Fn, Fp;
int Wn = 0, Wp = 0;
unpack_tsdf (*(pos + elem_step), Fn, Wn);
unpack_tsdf (*(pos - elem_step), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.z = (Fn - Fp)/cell_size.z;
unpack_tsdf (*(pos + volume.step/sizeof(short2) ), Fn, Wn);
unpack_tsdf (*(pos - volume.step/sizeof(short2) ), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.y = (Fn - Fp)/cell_size.y;
unpack_tsdf (*(pos + 1), Fn, Wn);
unpack_tsdf (*(pos - 1), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.x = (Fn - Fp)/cell_size.x;
if (normal.x != qnan && normal.y != qnan && normal.z != qnan)
{
float norm2 = dot(normal, normal);
if (norm2 >= 1e-10)
{
normal *= rsqrt(norm2);
float nt = v_g_x * normal.x + v_g_y * normal.y + v_g_z * normal.z;
float cosine = nt * rsqrt(v_g_x * v_g_x + v_g_y * v_g_y + v_g_z * v_g_z);
if (cosine < 0.5)
integrate = false;
}
}
}
if (integrate)
{
//read and unpack
float tsdf_prev;
int weight_prev;
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
} // for(int z = 0; z < VOLUME_Z; ++z)
} // __global__
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
integrateTsdfVolume (const PtrStepSz<ushort>& depth, const Intr& intr,
const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr,
float tranc_dist,
PtrStep<short2> volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer, DeviceArray2D<float>& depthScaled)
{
depthScaled.create (depth.rows, depth.cols);
dim3 block_scale (32, 8);
dim3 grid_scale (divUp (depth.cols, block_scale.x), divUp (depth.rows, block_scale.y));
//scales depth along ray and converts mm -> meters.
hipLaunchKernelGGL(( scaleDepth), dim3(grid_scale), dim3(block_scale), 0, 0, depth, depthScaled, intr);
cudaSafeCall ( hipGetLastError () );
float3 cell_size;
cell_size.x = volume_size.x / buffer->voxels_size.x;
cell_size.y = volume_size.y / buffer->voxels_size.y;
cell_size.z = volume_size.z / buffer->voxels_size.z;
//dim3 block(Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y);
dim3 block (16, 16);
dim3 grid (divUp (buffer->voxels_size.x, block.x), divUp (buffer->voxels_size.y, block.y));
hipLaunchKernelGGL(( tsdf23), dim3(grid), dim3(block), 0, 0, depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size, *buffer);
//tsdf23normal_hack<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
clearTSDFSlice (PtrStep<short2> volume, pcl::gpu::kinfuLS::tsdf_buffer* buffer, int shiftX, int shiftY, int shiftZ)
{
int newX = buffer->origin_GRID.x + shiftX;
int newY = buffer->origin_GRID.y + shiftY;
int3 minBounds, maxBounds;
//X
if(newX >= 0)
{
minBounds.x = buffer->origin_GRID.x;
maxBounds.x = newX;
}
else
{
minBounds.x = newX + buffer->voxels_size.x;
maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x;
}
if(minBounds.x > maxBounds.x)
std::swap(minBounds.x, maxBounds.x);
//Y
if(newY >= 0)
{
minBounds.y = buffer->origin_GRID.y;
maxBounds.y = newY;
}
else
{
minBounds.y = newY + buffer->voxels_size.y;
maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y;
}
if(minBounds.y > maxBounds.y)
std::swap(minBounds.y, maxBounds.y);
//Z
minBounds.z = buffer->origin_GRID.z;
maxBounds.z = shiftZ;
// call kernel
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (buffer->voxels_size.x, block.x);
grid.y = divUp (buffer->voxels_size.y, block.y);
hipLaunchKernelGGL(( clearSliceKernel), dim3(grid), dim3(block), 0, 0, volume, *buffer, minBounds, maxBounds);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
}
}
}
| 20826082c1b3d9348bbe7d1beb0aca7e43990c5f.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <boost/graph/buffer_concepts.hpp>
namespace pcl
{
namespace device
{
namespace kinfuLS
{
template<typename T>
__global__ void
initializeVolume (PtrStep<T> volume)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < VOLUME_X && y < VOLUME_Y)
{
T *pos = volume.ptr(y) + x;
int z_step = VOLUME_Y * volume.step / sizeof(*pos);
#pragma unroll
for(int z = 0; z < VOLUME_Z; ++z, pos+=z_step)
pack_tsdf (0.f, 0, *pos);
}
}
template<typename T>
__global__ void
clearSliceKernel (PtrStep<T> volume, pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//compute relative indices
int idX, idY;
if(x <= minBounds.x)
idX = x + buffer.voxels_size.x;
else
idX = x;
if(y <= minBounds.y)
idY = y + buffer.voxels_size.y;
else
idY = y;
if ( x < buffer.voxels_size.x && y < buffer.voxels_size.y)
{
if( (idX >= minBounds.x && idX <= maxBounds.x) || (idY >= minBounds.y && idY <= maxBounds.y) )
{
// BLACK ZONE => clear on all Z values
///Pointer to the first x,y,0
T *pos = volume.ptr(y) + x;
///Get the step on Z
int z_step = buffer.voxels_size.y * volume.step / sizeof(*pos);
///Get the size of the whole TSDF memory
int size = buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1;
///Move along z axis
#pragma unroll
for(int z = 0; z < buffer.voxels_size.z; ++z, pos+=z_step)
{
///If we went outside of the memory, make sure we go back to the beginning of it
if(pos > buffer.tsdf_memory_end)
pos = pos - size;
if (pos >= buffer.tsdf_memory_start && pos <= buffer.tsdf_memory_end) // quickfix for http://dev.pointclouds.org/issues/894
pack_tsdf (0.f, 0, *pos);
}
}
else /* if( idX > maxBounds.x && idY > maxBounds.y)*/
{
///RED ZONE => clear only appropriate Z
///Pointer to the first x,y,0
T *pos = volume.ptr(y) + x;
///Get the step on Z
int z_step = buffer.voxels_size.y * volume.step / sizeof(*pos);
///Get the size of the whole TSDF memory
int size = buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1;
///Move pointer to the Z origin
pos+= minBounds.z * z_step;
///If the Z offset is negative, we move the pointer back
if(maxBounds.z < 0)
pos += maxBounds.z * z_step;
///We make sure that we are not already before the start of the memory
if(pos < buffer.tsdf_memory_start)
pos = pos + size;
int nbSteps = abs(maxBounds.z);
#pragma unroll
for(int z = 0; z < nbSteps; ++z, pos+=z_step)
{
///If we went outside of the memory, make sure we go back to the beginning of it
if(pos > buffer.tsdf_memory_end)
pos = pos - size;
if (pos >= buffer.tsdf_memory_start && pos <= buffer.tsdf_memory_end) // quickfix for http://dev.pointclouds.org/issues/894
pack_tsdf (0.f, 0, *pos);
}
} //else /* if( idX > maxBounds.x && idY > maxBounds.y)*/
} // if ( x < VOLUME_X && y < VOLUME_Y)
} // clearSliceKernel
void
initVolume (PtrStep<short2> volume)
{
dim3 block (16, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
initializeVolume<<<grid, block>>>(volume);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
}
}
}
namespace pcl
{
namespace device
{
namespace kinfuLS
{
struct Tsdf
{
enum
{
CTA_SIZE_X = 32, CTA_SIZE_Y = 8,
MAX_WEIGHT = 1 << 7
};
mutable PtrStep<short2> volume;
float3 cell_size;
Intr intr;
Mat33 Rcurr_inv;
float3 tcurr;
PtrStepSz<ushort> depth_raw; //depth in mm
float tranc_dist_mm;
__device__ __forceinline__ float3
getVoxelGCoo (int x, int y, int z) const
{
float3 coo = make_float3 (x, y, z);
coo += 0.5f; //shift to cell center;
coo.x *= cell_size.x;
coo.y *= cell_size.y;
coo.z *= cell_size.z;
return coo;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
short2 *pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(*pos);
for (int z = 0; z < VOLUME_Z; ++z, pos += elem_step)
{
float3 v_g = getVoxelGCoo (x, y, z); //3 // p
//transform to curr cam coo space
float3 v = Rcurr_inv * (v_g - tcurr); //4
int2 coo; //project to current cam
coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx);
coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy);
if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6
{
int Dp = depth_raw.ptr (coo.y)[coo.x];
if (Dp != 0)
{
float xl = (coo.x - intr.cx) / intr.fx;
float yl = (coo.y - intr.cy) / intr.fy;
float lambda_inv = rsqrtf (xl * xl + yl * yl + 1);
float sdf = 1000 * norm (tcurr - v_g) * lambda_inv - Dp; //mm
sdf *= (-1);
if (sdf >= -tranc_dist_mm)
{
float tsdf = fmin (1.f, sdf / tranc_dist_mm);
int weight_prev;
float tsdf_prev;
//read and unpack
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
}
}
};
__global__ void
integrateTsdfKernel (const Tsdf tsdf) {
tsdf ();
}
__global__ void
tsdf2 (PtrStep<short2> volume, const float tranc_dist_mm, const Mat33 Rcurr_inv, float3 tcurr,
const Intr intr, const PtrStepSz<ushort> depth_raw, const float3 cell_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
short2 *pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(short2);
float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_x = Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z;
float v_y = Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z;
float v_z = Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z;
//#pragma unroll
for (int z = 0; z < VOLUME_Z; ++z)
{
float3 vr;
vr.x = v_g_x;
vr.y = v_g_y;
vr.z = (v_g_z + z * cell_size.z);
float3 v;
v.x = v_x + Rcurr_inv.data[0].z * z * cell_size.z;
v.y = v_y + Rcurr_inv.data[1].z * z * cell_size.z;
v.z = v_z + Rcurr_inv.data[2].z * z * cell_size.z;
int2 coo; //project to current cam
coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx);
coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy);
if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6
{
int Dp = depth_raw.ptr (coo.y)[coo.x]; //mm
if (Dp != 0)
{
float xl = (coo.x - intr.cx) / intr.fx;
float yl = (coo.y - intr.cy) / intr.fy;
float lambda_inv = rsqrtf (xl * xl + yl * yl + 1);
float sdf = Dp - norm (vr) * lambda_inv * 1000; //mm
if (sdf >= -tranc_dist_mm)
{
float tsdf = fmin (1.f, sdf / tranc_dist_mm);
int weight_prev;
float tsdf_prev;
//read and unpack
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
pos += elem_step;
} /* for(int z = 0; z < VOLUME_Z; ++z) */
} /* __global__ */
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
integrateTsdfVolume (const PtrStepSz<ushort>& depth_raw, const Intr& intr, const float3& volume_size,
const Mat33& Rcurr_inv, const float3& tcurr, float tranc_dist,
PtrStep<short2> volume)
{
Tsdf tsdf;
tsdf.volume = volume;
tsdf.cell_size.x = volume_size.x / VOLUME_X;
tsdf.cell_size.y = volume_size.y / VOLUME_Y;
tsdf.cell_size.z = volume_size.z / VOLUME_Z;
tsdf.intr = intr;
tsdf.Rcurr_inv = Rcurr_inv;
tsdf.tcurr = tcurr;
tsdf.depth_raw = depth_raw;
tsdf.tranc_dist_mm = tranc_dist*1000; //mm
dim3 block (Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
#if 0
//tsdf2<<<grid, block>>>(volume, tranc_dist, Rcurr_inv, tcurr, intr, depth_raw, tsdf.cell_size);
integrateTsdfKernel<<<grid, block>>>(tsdf);
#endif
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
}
}
}
namespace pcl
{
namespace device
{
namespace kinfuLS
{
__global__ void
scaleDepth (const PtrStepSz<ushort> depth, PtrStep<float> scaled, const Intr intr)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
int Dp = depth.ptr (y)[x];
float xl = (x - intr.cx) / intr.fx;
float yl = (y - intr.cy) / intr.fy;
float lambda = sqrtf (xl * xl + yl * yl + 1);
scaled.ptr (y)[x] = Dp * lambda/1000.f; //meters
}
__global__ void
tsdf23 (const PtrStepSz<float> depthScaled, PtrStep<short2> volume,
const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size, const pcl::gpu::kinfuLS::tsdf_buffer buffer)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= buffer.voxels_size.x || y >= buffer.voxels_size.y)
return;
float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y;
float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx;
float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy;
float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z);
float z_scaled = 0;
float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx;
float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy;
float tranc_dist_inv = 1.0f / tranc_dist;
short2* pos = volume.ptr (y) + x;
// shift the pointer to relative indices
shift_tsdf_pointer(&pos, buffer);
int elem_step = volume.step * buffer.voxels_size.y / sizeof(short2);
//#pragma unroll
for (int z = 0; z < buffer.voxels_size.z;
++z,
v_g_z += cell_size.z,
z_scaled += cell_size.z,
v_x += Rcurr_inv_0_z_scaled,
v_y += Rcurr_inv_1_z_scaled,
pos += elem_step)
{
// As the pointer is incremented in the for loop, we have to make sure that the pointer is never outside the memory
if(pos > buffer.tsdf_memory_end)
pos -= (buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1);
float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled);
if (inv_z < 0)
continue;
// project to current cam
int2 coo =
{
__float2int_rn (v_x * inv_z + intr.cx),
__float2int_rn (v_y * inv_z + intr.cy)
};
if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6
{
float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters
float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm);
if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters
{
float tsdf = fmin (1.0f, sdf * tranc_dist_inv);
//read and unpack
float tsdf_prev;
int weight_prev;
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
} // for(int z = 0; z < VOLUME_Z; ++z)
} // __global__
__global__ void
tsdf23normal_hack (const PtrStepSz<float> depthScaled, PtrStep<short2> volume,
const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
const float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
const float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y;
float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx;
float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy;
float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z);
float z_scaled = 0;
float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx;
float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy;
float tranc_dist_inv = 1.0f / tranc_dist;
short2* pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(short2);
//#pragma unroll
for (int z = 0; z < VOLUME_Z;
++z,
v_g_z += cell_size.z,
z_scaled += cell_size.z,
v_x += Rcurr_inv_0_z_scaled,
v_y += Rcurr_inv_1_z_scaled,
pos += elem_step)
{
float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled);
if (inv_z < 0)
continue;
// project to current cam
int2 coo =
{
__float2int_rn (v_x * inv_z + intr.cx),
__float2int_rn (v_y * inv_z + intr.cy)
};
if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6
{
float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters
float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm);
if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters
{
float tsdf = fmin (1.0f, sdf * tranc_dist_inv);
bool integrate = true;
if ((x > 0 && x < VOLUME_X-2) && (y > 0 && y < VOLUME_Y-2) && (z > 0 && z < VOLUME_Z-2))
{
const float qnan = numeric_limits<float>::quiet_NaN();
float3 normal = make_float3(qnan, qnan, qnan);
float Fn, Fp;
int Wn = 0, Wp = 0;
unpack_tsdf (*(pos + elem_step), Fn, Wn);
unpack_tsdf (*(pos - elem_step), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.z = (Fn - Fp)/cell_size.z;
unpack_tsdf (*(pos + volume.step/sizeof(short2) ), Fn, Wn);
unpack_tsdf (*(pos - volume.step/sizeof(short2) ), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.y = (Fn - Fp)/cell_size.y;
unpack_tsdf (*(pos + 1), Fn, Wn);
unpack_tsdf (*(pos - 1), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.x = (Fn - Fp)/cell_size.x;
if (normal.x != qnan && normal.y != qnan && normal.z != qnan)
{
float norm2 = dot(normal, normal);
if (norm2 >= 1e-10)
{
normal *= rsqrt(norm2);
float nt = v_g_x * normal.x + v_g_y * normal.y + v_g_z * normal.z;
float cosine = nt * rsqrt(v_g_x * v_g_x + v_g_y * v_g_y + v_g_z * v_g_z);
if (cosine < 0.5)
integrate = false;
}
}
}
if (integrate)
{
//read and unpack
float tsdf_prev;
int weight_prev;
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
} // for(int z = 0; z < VOLUME_Z; ++z)
} // __global__
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
integrateTsdfVolume (const PtrStepSz<ushort>& depth, const Intr& intr,
const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr,
float tranc_dist,
PtrStep<short2> volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer, DeviceArray2D<float>& depthScaled)
{
depthScaled.create (depth.rows, depth.cols);
dim3 block_scale (32, 8);
dim3 grid_scale (divUp (depth.cols, block_scale.x), divUp (depth.rows, block_scale.y));
//scales depth along ray and converts mm -> meters.
scaleDepth<<<grid_scale, block_scale>>>(depth, depthScaled, intr);
cudaSafeCall ( cudaGetLastError () );
float3 cell_size;
cell_size.x = volume_size.x / buffer->voxels_size.x;
cell_size.y = volume_size.y / buffer->voxels_size.y;
cell_size.z = volume_size.z / buffer->voxels_size.z;
//dim3 block(Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y);
dim3 block (16, 16);
dim3 grid (divUp (buffer->voxels_size.x, block.x), divUp (buffer->voxels_size.y, block.y));
tsdf23<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size, *buffer);
//tsdf23normal_hack<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
clearTSDFSlice (PtrStep<short2> volume, pcl::gpu::kinfuLS::tsdf_buffer* buffer, int shiftX, int shiftY, int shiftZ)
{
int newX = buffer->origin_GRID.x + shiftX;
int newY = buffer->origin_GRID.y + shiftY;
int3 minBounds, maxBounds;
//X
if(newX >= 0)
{
minBounds.x = buffer->origin_GRID.x;
maxBounds.x = newX;
}
else
{
minBounds.x = newX + buffer->voxels_size.x;
maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x;
}
if(minBounds.x > maxBounds.x)
std::swap(minBounds.x, maxBounds.x);
//Y
if(newY >= 0)
{
minBounds.y = buffer->origin_GRID.y;
maxBounds.y = newY;
}
else
{
minBounds.y = newY + buffer->voxels_size.y;
maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y;
}
if(minBounds.y > maxBounds.y)
std::swap(minBounds.y, maxBounds.y);
//Z
minBounds.z = buffer->origin_GRID.z;
maxBounds.z = shiftZ;
// call kernel
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (buffer->voxels_size.x, block.x);
grid.y = divUp (buffer->voxels_size.y, block.y);
clearSliceKernel<<<grid, block>>>(volume, *buffer, minBounds, maxBounds);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
}
}
}
|
d5fca630d836b4f09d1c1e91a09ea6182c1db873.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* _reg_blockMatching_gpu.cu
*
*
* Created by Marc Modat and Pankaj Daga on 24/03/2009.
* Copyright 2009 UCL - CMIC. All rights reserved.
*
*/
#ifndef _REG_BLOCKMATCHING_GPU_CU
#define _REG_BLOCKMATCHING_GPU_CU
#include "_reg_blockMatching_gpu.h"
#include "_reg_blockMatching_kernels.cu"
#include <fstream>
void block_matching_method_gpu(nifti_image *targetImage,
nifti_image *resultImage,
_reg_blockMatchingParam *params,
float **targetImageArray_d,
float **resultImageArray_d,
float **targetPosition_d,
float **resultPosition_d,
int **activeBlock_d)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
if(targetImage->nvox!=resultImage->nvox){
reg_print_fct_error("block_matching_method_gpu");
reg_print_msg_error("Target and warped images are expected to have the same size");
reg_exit(1);
}
// Copy some required parameters over to the device
int3 bDim =make_int3(params->blockNumber[0], params->blockNumber[1], params->blockNumber[2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_BlockDim, &bDim, sizeof(int3)));
// Image size
int3 image_size= make_int3(targetImage->nx, targetImage->ny, targetImage->nz);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ImageSize, &image_size, sizeof(int3)));
// Texture binding
const int numBlocks = bDim.x*bDim.y*bDim.z;
NR_CUDA_SAFE_CALL(hipBindTexture(0, targetImageArray_texture, *targetImageArray_d, targetImage->nvox*sizeof(float)));
NR_CUDA_SAFE_CALL(hipBindTexture(0, resultImageArray_texture, *resultImageArray_d, targetImage->nvox*sizeof(float)));
NR_CUDA_SAFE_CALL(hipBindTexture(0, activeBlock_texture, *activeBlock_d, numBlocks*sizeof(int)));
// Copy the sform transformation matrix onto the device memort
mat44 *xyz_mat;
if(targetImage->sform_code>0)
xyz_mat=&(targetImage->sto_xyz);
else xyz_mat=&(targetImage->qto_xyz);
float4 t_m_a_h = make_float4(xyz_mat->m[0][0],xyz_mat->m[0][1],xyz_mat->m[0][2],xyz_mat->m[0][3]);
float4 t_m_b_h = make_float4(xyz_mat->m[1][0],xyz_mat->m[1][1],xyz_mat->m[1][2],xyz_mat->m[1][3]);
float4 t_m_c_h = make_float4(xyz_mat->m[2][0],xyz_mat->m[2][1],xyz_mat->m[2][2],xyz_mat->m[2][3]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(t_m_a, &t_m_a_h,sizeof(float4)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(t_m_b, &t_m_b_h,sizeof(float4)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(t_m_c, &t_m_c_h,sizeof(float4)));
// We need to allocate some memory to keep track of overlap areas and values for blocks
unsigned memSize = BLOCK_SIZE * params->activeBlockNumber;
float * targetValues;NR_CUDA_SAFE_CALL(hipMalloc(&targetValues, memSize * sizeof(float)));
memSize = BLOCK_SIZE * params->activeBlockNumber;
float * resultValues;NR_CUDA_SAFE_CALL(hipMalloc(&resultValues, memSize * sizeof(float)));
unsigned int Grid_block_matching = (unsigned int)ceil((float)params->activeBlockNumber/(float)NR_BLOCK->Block_target_block);
unsigned int Grid_block_matching_2 = 1;
// We have hit the limit in one dimension
if (Grid_block_matching > 65335) {
Grid_block_matching_2 = (unsigned int)ceil((float)Grid_block_matching/65535.0f);
Grid_block_matching = 65335;
}
dim3 B1(NR_BLOCK->Block_target_block,1,1);
dim3 G1(Grid_block_matching,Grid_block_matching_2,1);
// process the target blocks
hipLaunchKernelGGL(( process_target_blocks_gpu), dim3(G1), dim3(B1), 0, 0, *targetPosition_d,
targetValues);
NR_CUDA_SAFE_CALL(hipDeviceSynchronize());
#ifndef NDEBUG
printf("[NiftyReg CUDA DEBUG] process_target_blocks_gpu kernel: %s - Grid size [%i %i %i] - Block size [%i %i %i]\n",
hipGetErrorString(hipGetLastError()),G1.x,G1.y,G1.z,B1.x,B1.y,B1.z);
#endif
unsigned int Result_block_matching = params->activeBlockNumber;
unsigned int Result_block_matching_2 = 1;
// We have hit the limit in one dimension
if (Result_block_matching > 65335) {
Result_block_matching_2 = (unsigned int)ceil((float)Result_block_matching/65535.0f);
Result_block_matching = 65335;
}
dim3 B2(NR_BLOCK->Block_result_block,1,1);
dim3 G2(Result_block_matching,Result_block_matching_2,1);
hipLaunchKernelGGL(( process_result_blocks_gpu), dim3(G2), dim3(B2), 0, 0, *resultPosition_d, targetValues);
NR_CUDA_SAFE_CALL(hipDeviceSynchronize());
#ifndef NDEBUG
printf("[NiftyReg CUDA DEBUG] process_result_blocks_gpu kernel: %s - Grid size [%i %i %i] - Block size [%i %i %i]\n",
hipGetErrorString(hipGetLastError()),G2.x,G2.y,G2.z,B2.x,B2.y,B2.z);
#endif
NR_CUDA_SAFE_CALL(hipUnbindTexture(targetImageArray_texture));
NR_CUDA_SAFE_CALL(hipUnbindTexture(resultImageArray_texture));
NR_CUDA_SAFE_CALL(hipUnbindTexture(activeBlock_texture));
NR_CUDA_SAFE_CALL(hipFree(targetValues));
NR_CUDA_SAFE_CALL(hipFree(resultValues));
}
void optimize_gpu( _reg_blockMatchingParam *blockMatchingParams,
mat44 *updateAffineMatrix,
float **targetPosition_d,
float **resultPosition_d,
bool affine)
{
// We will simply call the CPU version as this step is probably
// not worth implementing on the GPU.
// device to host copy
int memSize = blockMatchingParams->activeBlockNumber * 3 * sizeof(float);
NR_CUDA_SAFE_CALL(hipMemcpy(blockMatchingParams->targetPosition, *targetPosition_d, memSize, hipMemcpyDeviceToHost));
NR_CUDA_SAFE_CALL(hipMemcpy(blockMatchingParams->resultPosition, *resultPosition_d, memSize, hipMemcpyDeviceToHost));
// Cheat and call the CPU version.
optimize(blockMatchingParams, updateAffineMatrix, affine);
}
#endif
| d5fca630d836b4f09d1c1e91a09ea6182c1db873.cu | /*
* _reg_blockMatching_gpu.cu
*
*
* Created by Marc Modat and Pankaj Daga on 24/03/2009.
* Copyright 2009 UCL - CMIC. All rights reserved.
*
*/
#ifndef _REG_BLOCKMATCHING_GPU_CU
#define _REG_BLOCKMATCHING_GPU_CU
#include "_reg_blockMatching_gpu.h"
#include "_reg_blockMatching_kernels.cu"
#include <fstream>
void block_matching_method_gpu(nifti_image *targetImage,
nifti_image *resultImage,
_reg_blockMatchingParam *params,
float **targetImageArray_d,
float **resultImageArray_d,
float **targetPosition_d,
float **resultPosition_d,
int **activeBlock_d)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
if(targetImage->nvox!=resultImage->nvox){
reg_print_fct_error("block_matching_method_gpu");
reg_print_msg_error("Target and warped images are expected to have the same size");
reg_exit(1);
}
// Copy some required parameters over to the device
int3 bDim =make_int3(params->blockNumber[0], params->blockNumber[1], params->blockNumber[2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_BlockDim, &bDim, sizeof(int3)));
// Image size
int3 image_size= make_int3(targetImage->nx, targetImage->ny, targetImage->nz);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ImageSize, &image_size, sizeof(int3)));
// Texture binding
const int numBlocks = bDim.x*bDim.y*bDim.z;
NR_CUDA_SAFE_CALL(cudaBindTexture(0, targetImageArray_texture, *targetImageArray_d, targetImage->nvox*sizeof(float)));
NR_CUDA_SAFE_CALL(cudaBindTexture(0, resultImageArray_texture, *resultImageArray_d, targetImage->nvox*sizeof(float)));
NR_CUDA_SAFE_CALL(cudaBindTexture(0, activeBlock_texture, *activeBlock_d, numBlocks*sizeof(int)));
// Copy the sform transformation matrix onto the device memort
mat44 *xyz_mat;
if(targetImage->sform_code>0)
xyz_mat=&(targetImage->sto_xyz);
else xyz_mat=&(targetImage->qto_xyz);
float4 t_m_a_h = make_float4(xyz_mat->m[0][0],xyz_mat->m[0][1],xyz_mat->m[0][2],xyz_mat->m[0][3]);
float4 t_m_b_h = make_float4(xyz_mat->m[1][0],xyz_mat->m[1][1],xyz_mat->m[1][2],xyz_mat->m[1][3]);
float4 t_m_c_h = make_float4(xyz_mat->m[2][0],xyz_mat->m[2][1],xyz_mat->m[2][2],xyz_mat->m[2][3]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(t_m_a, &t_m_a_h,sizeof(float4)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(t_m_b, &t_m_b_h,sizeof(float4)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(t_m_c, &t_m_c_h,sizeof(float4)));
// We need to allocate some memory to keep track of overlap areas and values for blocks
unsigned memSize = BLOCK_SIZE * params->activeBlockNumber;
float * targetValues;NR_CUDA_SAFE_CALL(cudaMalloc(&targetValues, memSize * sizeof(float)));
memSize = BLOCK_SIZE * params->activeBlockNumber;
float * resultValues;NR_CUDA_SAFE_CALL(cudaMalloc(&resultValues, memSize * sizeof(float)));
unsigned int Grid_block_matching = (unsigned int)ceil((float)params->activeBlockNumber/(float)NR_BLOCK->Block_target_block);
unsigned int Grid_block_matching_2 = 1;
// We have hit the limit in one dimension
if (Grid_block_matching > 65335) {
Grid_block_matching_2 = (unsigned int)ceil((float)Grid_block_matching/65535.0f);
Grid_block_matching = 65335;
}
dim3 B1(NR_BLOCK->Block_target_block,1,1);
dim3 G1(Grid_block_matching,Grid_block_matching_2,1);
// process the target blocks
process_target_blocks_gpu<<<G1, B1>>>(*targetPosition_d,
targetValues);
NR_CUDA_SAFE_CALL(cudaThreadSynchronize());
#ifndef NDEBUG
printf("[NiftyReg CUDA DEBUG] process_target_blocks_gpu kernel: %s - Grid size [%i %i %i] - Block size [%i %i %i]\n",
cudaGetErrorString(cudaGetLastError()),G1.x,G1.y,G1.z,B1.x,B1.y,B1.z);
#endif
unsigned int Result_block_matching = params->activeBlockNumber;
unsigned int Result_block_matching_2 = 1;
// We have hit the limit in one dimension
if (Result_block_matching > 65335) {
Result_block_matching_2 = (unsigned int)ceil((float)Result_block_matching/65535.0f);
Result_block_matching = 65335;
}
dim3 B2(NR_BLOCK->Block_result_block,1,1);
dim3 G2(Result_block_matching,Result_block_matching_2,1);
process_result_blocks_gpu<<<G2, B2>>>(*resultPosition_d, targetValues);
NR_CUDA_SAFE_CALL(cudaThreadSynchronize());
#ifndef NDEBUG
printf("[NiftyReg CUDA DEBUG] process_result_blocks_gpu kernel: %s - Grid size [%i %i %i] - Block size [%i %i %i]\n",
cudaGetErrorString(cudaGetLastError()),G2.x,G2.y,G2.z,B2.x,B2.y,B2.z);
#endif
NR_CUDA_SAFE_CALL(cudaUnbindTexture(targetImageArray_texture));
NR_CUDA_SAFE_CALL(cudaUnbindTexture(resultImageArray_texture));
NR_CUDA_SAFE_CALL(cudaUnbindTexture(activeBlock_texture));
NR_CUDA_SAFE_CALL(cudaFree(targetValues));
NR_CUDA_SAFE_CALL(cudaFree(resultValues));
}
void optimize_gpu( _reg_blockMatchingParam *blockMatchingParams,
mat44 *updateAffineMatrix,
float **targetPosition_d,
float **resultPosition_d,
bool affine)
{
// We will simply call the CPU version as this step is probably
// not worth implementing on the GPU.
// device to host copy
int memSize = blockMatchingParams->activeBlockNumber * 3 * sizeof(float);
NR_CUDA_SAFE_CALL(cudaMemcpy(blockMatchingParams->targetPosition, *targetPosition_d, memSize, cudaMemcpyDeviceToHost));
NR_CUDA_SAFE_CALL(cudaMemcpy(blockMatchingParams->resultPosition, *resultPosition_d, memSize, cudaMemcpyDeviceToHost));
// Cheat and call the CPU version.
optimize(blockMatchingParams, updateAffineMatrix, affine);
}
#endif
|
9a450f02b698a14dc4bf4f56ebc66c9ab91a4c99.hip | // !!! This is a file automatically generated by hipify!!!
#include <iomanip>
#include <iostream>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/transform_reduce.h>
#include <thrust/host_vector.h>
#include <fstream>
using namespace std;
struct integrate
{
__host__ __device__
double operator()(double x)
{
return exp(sin(x))*cos(x/40);
}
};
double f(double x){
return exp(sin(x))*cos(x/40);
}
int main(int argc, char *argv[]){
double n = atoi(argv[1]);
double x0 = 0;
double xn = 100;
double h = (xn-x0)/n;
double sum;
double res;
double ref = 32.121040666358;
ofstream ofile;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, NULL);
thrust::host_vector<double> hX(n);
for(int i = 0; i < (n-4); i++){
hX[i] = (xn-x0)*(i+4)/n;
}
thrust::device_vector<double> dX = hX;
// thrust::device_vector<double> dOUT;
thrust::plus<float> binary_op;
hipEventRecord(start,NULL);
double init = 0.0;
sum = thrust::transform_reduce(dX.begin(), dX.end(), integrate(), init, binary_op);
// sum = thrust::reduce(dOUT.begin(), dOUT.end(), 1, plus<double>());
sum = sum*48;
sum += 17*f(x0) + 59*f((xn-x0)/n) + 43*f((xn-x0)*2/n) + 49*f((xn-x0)*3/n);
sum += 17*f((xn-x0)) + 59*f((xn-x0)*(n-1)/n) + 43*f((xn-x0)*(n-2)/n) + 49*f((xn-x0)*(n-3)/n);
res = sum*h/48;
hipEventRecord(stop,NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
//Use this for scaling analysis
/* cout << setprecision(12) << abs(res - ref);
printf("\t%f\t",(msecTotal));
cout << log2(n) << endl;
*/
ofile.open ("problem3.out");
ofile << setprecision(12) << abs(res-ref);
ofile << endl;
ofile << msecTotal;
ofile.close();
return 0;
}
| 9a450f02b698a14dc4bf4f56ebc66c9ab91a4c99.cu | #include <iomanip>
#include <iostream>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/transform_reduce.h>
#include <thrust/host_vector.h>
#include <fstream>
using namespace std;
struct integrate
{
__host__ __device__
double operator()(double x)
{
return exp(sin(x))*cos(x/40);
}
};
double f(double x){
return exp(sin(x))*cos(x/40);
}
int main(int argc, char *argv[]){
double n = atoi(argv[1]);
double x0 = 0;
double xn = 100;
double h = (xn-x0)/n;
double sum;
double res;
double ref = 32.121040666358;
ofstream ofile;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, NULL);
thrust::host_vector<double> hX(n);
for(int i = 0; i < (n-4); i++){
hX[i] = (xn-x0)*(i+4)/n;
}
thrust::device_vector<double> dX = hX;
// thrust::device_vector<double> dOUT;
thrust::plus<float> binary_op;
cudaEventRecord(start,NULL);
double init = 0.0;
sum = thrust::transform_reduce(dX.begin(), dX.end(), integrate(), init, binary_op);
// sum = thrust::reduce(dOUT.begin(), dOUT.end(), 1, plus<double>());
sum = sum*48;
sum += 17*f(x0) + 59*f((xn-x0)/n) + 43*f((xn-x0)*2/n) + 49*f((xn-x0)*3/n);
sum += 17*f((xn-x0)) + 59*f((xn-x0)*(n-1)/n) + 43*f((xn-x0)*(n-2)/n) + 49*f((xn-x0)*(n-3)/n);
res = sum*h/48;
cudaEventRecord(stop,NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
//Use this for scaling analysis
/* cout << setprecision(12) << abs(res - ref);
printf("\t%f\t",(msecTotal));
cout << log2(n) << endl;
*/
ofile.open ("problem3.out");
ofile << setprecision(12) << abs(res-ref);
ofile << endl;
ofile << msecTotal;
ofile.close();
return 0;
}
|
6282311350c1a1226a0380d6de76d60f5a5269c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ---------------------------------------------------------
// Copyright (c) 2016, Andy Zeng
//
// This file is part of the 3DMatch Toolbox and is available
// under the terms of the Simplified BSD License provided in
// LICENSE. Please retain this notice and LICENSE if you use
// this file (or any portion of it) in your project.
// ---------------------------------------------------------
#include <iostream>
#include <fstream>
#include <iomanip>
#include <sstream>
#include <string>
#include "utils.hpp"
#include "marvin.hpp"
#define CUDA_NUM_THREADS 512
#define CUDA_MAX_NUM_BLOCKS 2880
// CUDA kernel function to compute TDF voxel grid values given a point cloud (warning: approximate, but fast)
__global__
void ComputeTDF(int CUDA_LOOP_IDX, float * voxel_grid_occ, float * voxel_grid_TDF,
int voxel_grid_dim_x, int voxel_grid_dim_y, int voxel_grid_dim_z,
float voxel_grid_origin_x, float voxel_grid_origin_y, float voxel_grid_origin_z,
float voxel_size, float trunc_margin) {
int voxel_idx = CUDA_LOOP_IDX * CUDA_NUM_THREADS * CUDA_MAX_NUM_BLOCKS + blockIdx.x * CUDA_NUM_THREADS + threadIdx.x;
if (voxel_idx > (voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z))
return;
int pt_grid_z = (int)floor((float)voxel_idx / ((float)voxel_grid_dim_x * (float)voxel_grid_dim_y));
int pt_grid_y = (int)floor(((float)voxel_idx - ((float)pt_grid_z * (float)voxel_grid_dim_x * (float)voxel_grid_dim_y)) / (float)voxel_grid_dim_x);
int pt_grid_x = (int)((float)voxel_idx - ((float)pt_grid_z * (float)voxel_grid_dim_x * (float)voxel_grid_dim_y) - ((float)pt_grid_y * (float)voxel_grid_dim_x));
int search_radius = (int)round(trunc_margin / voxel_size);
if (voxel_grid_occ[voxel_idx] > 0) {
voxel_grid_TDF[voxel_idx] = 1.0f; // on surface
return;
}
// Find closest surface point
for (int iix = max(0, pt_grid_x - search_radius); iix < min(voxel_grid_dim_x, pt_grid_x + search_radius + 1); ++iix)
for (int iiy = max(0, pt_grid_y - search_radius); iiy < min(voxel_grid_dim_y, pt_grid_y + search_radius + 1); ++iiy)
for (int iiz = max(0, pt_grid_z - search_radius); iiz < min(voxel_grid_dim_z, pt_grid_z + search_radius + 1); ++iiz) {
int iidx = iiz * voxel_grid_dim_x * voxel_grid_dim_y + iiy * voxel_grid_dim_x + iix;
if (voxel_grid_occ[iidx] > 0) {
float xd = (float)(pt_grid_x - iix);
float yd = (float)(pt_grid_y - iiy);
float zd = (float)(pt_grid_z - iiz);
float dist = sqrtf(xd * xd + yd * yd + zd * zd) / (float)search_radius;
if ((1.0f - dist) > voxel_grid_TDF[voxel_idx])
voxel_grid_TDF[voxel_idx] = 1.0f - dist;
}
}
}
// Demo code to show how to generate keypoints and 3DMatch descriptors from a point cloud
// 1. Loads a point cloud file
// 2. Generates a TDF voxel volume for the point cloud
// 3. Finds random surface keypoints
// 4. Compute 3DMatch descriptor vectors for all keypoints using Marvin
int main(int argc, char * argv[]) {
std::string pointcloud_filename(argv[1]);
std::string out_prefix_filename(argv[2]);
// Super hacky code to read a point cloud file (replace this...)
std::ifstream pointcloud_file(pointcloud_filename.c_str());
if (!pointcloud_file) {
std::cerr << "Point cloud file not found." << std::endl;
return -1;
}
int num_pts = 0;
for (int line_idx = 0; line_idx < 7; ++line_idx) {
std::string line_str;
std::getline(pointcloud_file, line_str);
if (line_idx >= 2) {
if (num_pts == 0) {
std::istringstream tmp_line(line_str);
std::string tmp_line_prefix;
tmp_line >> tmp_line_prefix;
tmp_line >> tmp_line_prefix;
tmp_line >> num_pts;
}
}
}
if (num_pts == 0) {
std::cerr << "Line 3-7 of .ply file does not tell me number of points. Double check format of point cloud file (or change .ply file reader code)." << std::endl;
return 0;
}
float * pts = new float[num_pts * 3]; // Nx3 matrix saved as float array (row-major order)
// pointcloud_file.read((char*)pts, sizeof(float) * num_pts * 3);
std::string line;
int idx=0;
int ctx=0;
// Read one line at a time into the variable line:
while(std::getline(pointcloud_file, line))
{
std::vector<float> lineData;
std::stringstream lineStream(line);
float value;
// Read an integer at a time from the line
while(lineStream >> value)
{
// Add the integers from a line to a 1D array (vector)
//lineData.push_back(value);
pts[idx * 3 + ctx++ ] = value;
if(ctx>2){
idx++;ctx=0;
}
}
}
pointcloud_file.close();
std::cout << "Loaded point cloud with " << num_pts << " points!" << std::endl;
for (int pt_idx = 0; pt_idx < 10; ++pt_idx) {
std::cout << "[%d] [x,y,z]=[" << pt_idx << "] [" << pts[pt_idx * 3 + 0] << "," << pts[pt_idx * 3 + 1 ] <<
"," << pts[pt_idx * 3 + 2 ] << "]" << std::endl;
}
float voxel_size = 0.01;
float trunc_margin = voxel_size * 5;
int voxel_grid_padding = 15; // in voxels
// Compute point cloud coordinates of the origin voxel (0,0,0) of the voxel grid
float voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z;
float voxel_grid_max_x, voxel_grid_max_y, voxel_grid_max_z;
voxel_grid_origin_x = pts[0]; voxel_grid_max_x = pts[0];
voxel_grid_origin_y = pts[1]; voxel_grid_max_y = pts[1];
voxel_grid_origin_z = pts[2]; voxel_grid_max_z = pts[2];
for (int pt_idx = 0; pt_idx < num_pts; ++pt_idx) {
voxel_grid_origin_x = min(voxel_grid_origin_x, pts[pt_idx * 3 + 0]);
voxel_grid_origin_y = min(voxel_grid_origin_y, pts[pt_idx * 3 + 1]);
voxel_grid_origin_z = min(voxel_grid_origin_z, pts[pt_idx * 3 + 2]);
voxel_grid_max_x = max(voxel_grid_max_x, pts[pt_idx * 3 + 0]);
voxel_grid_max_y = max(voxel_grid_max_y, pts[pt_idx * 3 + 1]);
voxel_grid_max_z = max(voxel_grid_max_z, pts[pt_idx * 3 + 2]);
}
int voxel_grid_dim_x = round((voxel_grid_max_x - voxel_grid_origin_x) / voxel_size) + 1 + voxel_grid_padding * 2;
int voxel_grid_dim_y = round((voxel_grid_max_y - voxel_grid_origin_y) / voxel_size) + 1 + voxel_grid_padding * 2;
int voxel_grid_dim_z = round((voxel_grid_max_z - voxel_grid_origin_z) / voxel_size) + 1 + voxel_grid_padding * 2;
voxel_grid_origin_x = voxel_grid_origin_x - voxel_grid_padding * voxel_size + voxel_size / 2;
voxel_grid_origin_y = voxel_grid_origin_y - voxel_grid_padding * voxel_size + voxel_size / 2;
voxel_grid_origin_z = voxel_grid_origin_z - voxel_grid_padding * voxel_size + voxel_size / 2;
std::cout << "[x min,xmax]=[" << voxel_grid_origin_x << "," << voxel_grid_max_x << "]" << std::endl;
std::cout << "[y min,max]=[" << voxel_grid_origin_y << "," << voxel_grid_max_y << "]" << std::endl;
std::cout << "[z min,max]=[" << voxel_grid_origin_z << "," << voxel_grid_max_z << "]" << std::endl;
std::cout << "Size of TDF voxel grid: " << voxel_grid_dim_x << " x " << voxel_grid_dim_y << " x " << voxel_grid_dim_z << std::endl;
std::cout << "Computing TDF voxel grid..." << std::endl;
// Compute surface occupancy grid
float * voxel_grid_occ = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
memset(voxel_grid_occ, 0, sizeof(float) * voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z);
for (int pt_idx = 0; pt_idx < num_pts; ++pt_idx) {
int pt_grid_x = round((pts[pt_idx * 3 + 0] - voxel_grid_origin_x) / voxel_size);
int pt_grid_y = round((pts[pt_idx * 3 + 1] - voxel_grid_origin_y) / voxel_size);
int pt_grid_z = round((pts[pt_idx * 3 + 2] - voxel_grid_origin_z) / voxel_size);
voxel_grid_occ[pt_grid_z * voxel_grid_dim_y * voxel_grid_dim_x + pt_grid_y * voxel_grid_dim_x + pt_grid_x] = 1.0f;
}
// Initialize TDF voxel grid
float * voxel_grid_TDF = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
memset(voxel_grid_TDF, 0, sizeof(float) * voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z);
// Copy voxel grids to GPU memory
float * gpu_voxel_grid_occ;
float * gpu_voxel_grid_TDF;
hipMalloc(&gpu_voxel_grid_occ, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
hipMalloc(&gpu_voxel_grid_TDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
marvin::checkCUDA(__LINE__, hipGetLastError());
hipMemcpy(gpu_voxel_grid_occ, voxel_grid_occ, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_voxel_grid_TDF, voxel_grid_TDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyHostToDevice);
marvin::checkCUDA(__LINE__, hipGetLastError());
int CUDA_NUM_LOOPS = (int)ceil((float)(voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z) / (float)(CUDA_NUM_THREADS * CUDA_MAX_NUM_BLOCKS));
for (int CUDA_LOOP_IDX = 0; CUDA_LOOP_IDX < CUDA_NUM_LOOPS; ++CUDA_LOOP_IDX) {
hipLaunchKernelGGL(( ComputeTDF) , dim3(CUDA_MAX_NUM_BLOCKS), dim3(CUDA_NUM_THREADS) , 0, 0, CUDA_LOOP_IDX, gpu_voxel_grid_occ, gpu_voxel_grid_TDF,
voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z,
voxel_size, trunc_margin);
}
// Load TDF voxel grid from GPU to CPU memory
hipMemcpy(voxel_grid_TDF, gpu_voxel_grid_TDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyDeviceToHost);
marvin::checkCUDA(__LINE__, hipGetLastError());
// Compute random surface keypoints in point cloud coordinates and voxel grid coordinates
std::cout << "Finding random surface keypoints..." << std::endl;
int num_keypts = 50 * 10;
float * keypts = new float[num_keypts * 3];
float * keypts_grid = new float[num_keypts * 3];
for (int keypt_idx = 0; keypt_idx < num_keypts; ++keypt_idx) {
int rand_idx = (int)(GetRandomFloat(0.0f, (float)num_pts));
keypts[keypt_idx * 3 + 0] = pts[rand_idx * 3 + 0];
keypts[keypt_idx * 3 + 1] = pts[rand_idx * 3 + 1];
keypts[keypt_idx * 3 + 2] = pts[rand_idx * 3 + 2];
keypts_grid[keypt_idx * 3 + 0] = round((pts[rand_idx * 3 + 0] - voxel_grid_origin_x) / voxel_size);
keypts_grid[keypt_idx * 3 + 1] = round((pts[rand_idx * 3 + 1] - voxel_grid_origin_y) / voxel_size);
keypts_grid[keypt_idx * 3 + 2] = round((pts[rand_idx * 3 + 2] - voxel_grid_origin_z) / voxel_size);
}
// Start Marvin network
marvin::Net convnet("3dmatch-net-test.json");
convnet.Malloc(marvin::Testing);
convnet.loadWeights("3dmatch-weights-snapshot-137000.marvin");
marvin::Response * rData;
marvin::Response * rFeat;
rData = convnet.getResponse("data");
rFeat = convnet.getResponse("feat");
std::cout << "3DMatch network architecture successfully loaded into Marvin!" << std::endl;
// Run forward passes with Marvin to get 3DMatch descriptors for each keypoint
int batch_size = 50;
int desc_size = 512;
StorageT * batch_TDF = new StorageT[batch_size * 30 * 30 * 30];
float * desc_3dmatch = new float[num_keypts * desc_size];
std::cout << "Computing 3DMatch descriptors for " << num_keypts << " keypoints..." << std::endl;
for (int batch_idx = 0; batch_idx < (num_keypts / batch_size); ++batch_idx) {
for (int keypt_idx = batch_idx * batch_size; keypt_idx < (batch_idx + 1) * batch_size; ++keypt_idx) {
int batch_keypt_idx = keypt_idx - batch_idx * batch_size;
float keypt_grid_x = keypts_grid[keypt_idx * 3 + 0];
float keypt_grid_y = keypts_grid[keypt_idx * 3 + 1];
float keypt_grid_z = keypts_grid[keypt_idx * 3 + 2];
// std::cout << keypt_idx << " " << batch_keypt_idx << std::endl;
// std::cout << " " << keypt_grid_x << " " << keypt_grid_y << " " << keypt_grid_z << std::endl;
// Get local TDF around keypoint
StorageT * local_voxel_grid_TDF = new StorageT[30 * 30 * 30];
int local_voxel_idx = 0;
for (int z = keypt_grid_z - 15; z < keypt_grid_z + 15; ++z)
for (int y = keypt_grid_y - 15; y < keypt_grid_y + 15; ++y)
for (int x = keypt_grid_x - 15; x < keypt_grid_x + 15; ++x) {
local_voxel_grid_TDF[local_voxel_idx] = CPUCompute2StorageT(voxel_grid_TDF[z * voxel_grid_dim_x * voxel_grid_dim_y + y * voxel_grid_dim_x + x]);
local_voxel_idx++;
}
for (int voxel_idx = 0; voxel_idx < 30 * 30 * 30; ++voxel_idx)
batch_TDF[batch_keypt_idx * 30 * 30 * 30 + voxel_idx] = local_voxel_grid_TDF[voxel_idx];
delete [] local_voxel_grid_TDF;
}
// Pass local TDF patches through Marvin
hipMemcpy(rData->dataGPU, batch_TDF, rData->numBytes(), hipMemcpyHostToDevice);
marvin::checkCUDA(__LINE__, hipGetLastError());
convnet.forward();
// Copy descriptor vectors from GPU to CPU memory
StorageT * desc_vecs = new StorageT[batch_size * desc_size];
hipMemcpy(desc_vecs, rFeat->dataGPU, rFeat->numBytes(), hipMemcpyDeviceToHost);
marvin::checkCUDA(__LINE__, hipGetLastError());
for (int desc_val_idx = 0; desc_val_idx < batch_size * desc_size; ++desc_val_idx)
desc_3dmatch[batch_idx * batch_size * desc_size + desc_val_idx] = CPUStorage2ComputeT(desc_vecs[desc_val_idx]);
delete [] desc_vecs;
}
// Save keypoints as binary file (Nx3 float array, row-major order)
std::cout << "Saving keypoints to disk (keypts.bin)..." << std::endl;
std::string keypts_saveto_path = out_prefix_filename + ".keypts.bin";
std::ofstream keypts_out_file(keypts_saveto_path, std::ios::binary | std::ios::out);
float num_keyptsf = (float) num_keypts;
keypts_out_file.write((char*)&num_keyptsf, sizeof(float));
for (int keypt_val_idx = 0; keypt_val_idx < num_keypts * 3; ++keypt_val_idx)
keypts_out_file.write((char*)&keypts[keypt_val_idx], sizeof(float));
keypts_out_file.close();
// Save 3DMatch descriptors as binary file (Nx512 float array, row-major order)
std::cout << "Saving 3DMatch descriptors to disk (desc.3dmatch.bin)..." << std::endl;
std::string desc_saveto_path = out_prefix_filename + ".desc.3dmatch.bin";
std::ofstream desc_out_file(desc_saveto_path, std::ios::binary | std::ios::out);
float desc_sizef = (float) desc_size;
desc_out_file.write((char*)&num_keyptsf, sizeof(float));
desc_out_file.write((char*)&desc_sizef, sizeof(float));
for (int desc_val_idx = 0; desc_val_idx < num_keypts * desc_size; ++desc_val_idx)
desc_out_file.write((char*)&desc_3dmatch[desc_val_idx], sizeof(float));
desc_out_file.close();
// // Save TDF voxel grid and its parameters to disk as binary file (float array)
// std::cout << "Saving TDF voxel grid values to disk (tdf.bin)..." << std::endl;
// std::string voxel_grid_saveto_path = "tdf.bin";
// std::ofstream tdf_out_file(voxel_grid_saveto_path, std::ios::binary | std::ios::out);
// float voxel_grid_dim_xf = (float) voxel_grid_dim_x;
// float voxel_grid_dim_yf = (float) voxel_grid_dim_y;
// float voxel_grid_dim_zf = (float) voxel_grid_dim_z;
// tdf_out_file.write((char*)&voxel_grid_dim_xf, sizeof(float));
// tdf_out_file.write((char*)&voxel_grid_dim_yf, sizeof(float));
// tdf_out_file.write((char*)&voxel_grid_dim_zf, sizeof(float));
// tdf_out_file.write((char*)&voxel_grid_origin_x, sizeof(float));
// tdf_out_file.write((char*)&voxel_grid_origin_y, sizeof(float));
// tdf_out_file.write((char*)&voxel_grid_origin_z, sizeof(float));
// tdf_out_file.write((char*)&voxel_size, sizeof(float));
// tdf_out_file.write((char*)&trunc_margin, sizeof(float));
// for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i)
// tdf_out_file.write((char*)&voxel_grid_TDF[i], sizeof(float));
// tdf_out_file.close();
return 0;
}
| 6282311350c1a1226a0380d6de76d60f5a5269c8.cu | // ---------------------------------------------------------
// Copyright (c) 2016, Andy Zeng
//
// This file is part of the 3DMatch Toolbox and is available
// under the terms of the Simplified BSD License provided in
// LICENSE. Please retain this notice and LICENSE if you use
// this file (or any portion of it) in your project.
// ---------------------------------------------------------
#include <iostream>
#include <fstream>
#include <iomanip>
#include <sstream>
#include <string>
#include "utils.hpp"
#include "marvin.hpp"
#define CUDA_NUM_THREADS 512
#define CUDA_MAX_NUM_BLOCKS 2880
// CUDA kernel function to compute TDF voxel grid values given a point cloud (warning: approximate, but fast)
__global__
void ComputeTDF(int CUDA_LOOP_IDX, float * voxel_grid_occ, float * voxel_grid_TDF,
int voxel_grid_dim_x, int voxel_grid_dim_y, int voxel_grid_dim_z,
float voxel_grid_origin_x, float voxel_grid_origin_y, float voxel_grid_origin_z,
float voxel_size, float trunc_margin) {
int voxel_idx = CUDA_LOOP_IDX * CUDA_NUM_THREADS * CUDA_MAX_NUM_BLOCKS + blockIdx.x * CUDA_NUM_THREADS + threadIdx.x;
if (voxel_idx > (voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z))
return;
int pt_grid_z = (int)floor((float)voxel_idx / ((float)voxel_grid_dim_x * (float)voxel_grid_dim_y));
int pt_grid_y = (int)floor(((float)voxel_idx - ((float)pt_grid_z * (float)voxel_grid_dim_x * (float)voxel_grid_dim_y)) / (float)voxel_grid_dim_x);
int pt_grid_x = (int)((float)voxel_idx - ((float)pt_grid_z * (float)voxel_grid_dim_x * (float)voxel_grid_dim_y) - ((float)pt_grid_y * (float)voxel_grid_dim_x));
int search_radius = (int)round(trunc_margin / voxel_size);
if (voxel_grid_occ[voxel_idx] > 0) {
voxel_grid_TDF[voxel_idx] = 1.0f; // on surface
return;
}
// Find closest surface point
for (int iix = max(0, pt_grid_x - search_radius); iix < min(voxel_grid_dim_x, pt_grid_x + search_radius + 1); ++iix)
for (int iiy = max(0, pt_grid_y - search_radius); iiy < min(voxel_grid_dim_y, pt_grid_y + search_radius + 1); ++iiy)
for (int iiz = max(0, pt_grid_z - search_radius); iiz < min(voxel_grid_dim_z, pt_grid_z + search_radius + 1); ++iiz) {
int iidx = iiz * voxel_grid_dim_x * voxel_grid_dim_y + iiy * voxel_grid_dim_x + iix;
if (voxel_grid_occ[iidx] > 0) {
float xd = (float)(pt_grid_x - iix);
float yd = (float)(pt_grid_y - iiy);
float zd = (float)(pt_grid_z - iiz);
float dist = sqrtf(xd * xd + yd * yd + zd * zd) / (float)search_radius;
if ((1.0f - dist) > voxel_grid_TDF[voxel_idx])
voxel_grid_TDF[voxel_idx] = 1.0f - dist;
}
}
}
// Demo code to show how to generate keypoints and 3DMatch descriptors from a point cloud
// 1. Loads a point cloud file
// 2. Generates a TDF voxel volume for the point cloud
// 3. Finds random surface keypoints
// 4. Compute 3DMatch descriptor vectors for all keypoints using Marvin
int main(int argc, char * argv[]) {
std::string pointcloud_filename(argv[1]);
std::string out_prefix_filename(argv[2]);
// Super hacky code to read a point cloud file (replace this...)
std::ifstream pointcloud_file(pointcloud_filename.c_str());
if (!pointcloud_file) {
std::cerr << "Point cloud file not found." << std::endl;
return -1;
}
int num_pts = 0;
for (int line_idx = 0; line_idx < 7; ++line_idx) {
std::string line_str;
std::getline(pointcloud_file, line_str);
if (line_idx >= 2) {
if (num_pts == 0) {
std::istringstream tmp_line(line_str);
std::string tmp_line_prefix;
tmp_line >> tmp_line_prefix;
tmp_line >> tmp_line_prefix;
tmp_line >> num_pts;
}
}
}
if (num_pts == 0) {
std::cerr << "Line 3-7 of .ply file does not tell me number of points. Double check format of point cloud file (or change .ply file reader code)." << std::endl;
return 0;
}
float * pts = new float[num_pts * 3]; // Nx3 matrix saved as float array (row-major order)
// pointcloud_file.read((char*)pts, sizeof(float) * num_pts * 3);
std::string line;
int idx=0;
int ctx=0;
// Read one line at a time into the variable line:
while(std::getline(pointcloud_file, line))
{
std::vector<float> lineData;
std::stringstream lineStream(line);
float value;
// Read an integer at a time from the line
while(lineStream >> value)
{
// Add the integers from a line to a 1D array (vector)
//lineData.push_back(value);
pts[idx * 3 + ctx++ ] = value;
if(ctx>2){
idx++;ctx=0;
}
}
}
pointcloud_file.close();
std::cout << "Loaded point cloud with " << num_pts << " points!" << std::endl;
for (int pt_idx = 0; pt_idx < 10; ++pt_idx) {
std::cout << "[%d] [x,y,z]=[" << pt_idx << "] [" << pts[pt_idx * 3 + 0] << "," << pts[pt_idx * 3 + 1 ] <<
"," << pts[pt_idx * 3 + 2 ] << "]" << std::endl;
}
float voxel_size = 0.01;
float trunc_margin = voxel_size * 5;
int voxel_grid_padding = 15; // in voxels
// Compute point cloud coordinates of the origin voxel (0,0,0) of the voxel grid
float voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z;
float voxel_grid_max_x, voxel_grid_max_y, voxel_grid_max_z;
voxel_grid_origin_x = pts[0]; voxel_grid_max_x = pts[0];
voxel_grid_origin_y = pts[1]; voxel_grid_max_y = pts[1];
voxel_grid_origin_z = pts[2]; voxel_grid_max_z = pts[2];
for (int pt_idx = 0; pt_idx < num_pts; ++pt_idx) {
voxel_grid_origin_x = min(voxel_grid_origin_x, pts[pt_idx * 3 + 0]);
voxel_grid_origin_y = min(voxel_grid_origin_y, pts[pt_idx * 3 + 1]);
voxel_grid_origin_z = min(voxel_grid_origin_z, pts[pt_idx * 3 + 2]);
voxel_grid_max_x = max(voxel_grid_max_x, pts[pt_idx * 3 + 0]);
voxel_grid_max_y = max(voxel_grid_max_y, pts[pt_idx * 3 + 1]);
voxel_grid_max_z = max(voxel_grid_max_z, pts[pt_idx * 3 + 2]);
}
int voxel_grid_dim_x = round((voxel_grid_max_x - voxel_grid_origin_x) / voxel_size) + 1 + voxel_grid_padding * 2;
int voxel_grid_dim_y = round((voxel_grid_max_y - voxel_grid_origin_y) / voxel_size) + 1 + voxel_grid_padding * 2;
int voxel_grid_dim_z = round((voxel_grid_max_z - voxel_grid_origin_z) / voxel_size) + 1 + voxel_grid_padding * 2;
voxel_grid_origin_x = voxel_grid_origin_x - voxel_grid_padding * voxel_size + voxel_size / 2;
voxel_grid_origin_y = voxel_grid_origin_y - voxel_grid_padding * voxel_size + voxel_size / 2;
voxel_grid_origin_z = voxel_grid_origin_z - voxel_grid_padding * voxel_size + voxel_size / 2;
std::cout << "[x min,xmax]=[" << voxel_grid_origin_x << "," << voxel_grid_max_x << "]" << std::endl;
std::cout << "[y min,max]=[" << voxel_grid_origin_y << "," << voxel_grid_max_y << "]" << std::endl;
std::cout << "[z min,max]=[" << voxel_grid_origin_z << "," << voxel_grid_max_z << "]" << std::endl;
std::cout << "Size of TDF voxel grid: " << voxel_grid_dim_x << " x " << voxel_grid_dim_y << " x " << voxel_grid_dim_z << std::endl;
std::cout << "Computing TDF voxel grid..." << std::endl;
// Compute surface occupancy grid
float * voxel_grid_occ = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
memset(voxel_grid_occ, 0, sizeof(float) * voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z);
for (int pt_idx = 0; pt_idx < num_pts; ++pt_idx) {
int pt_grid_x = round((pts[pt_idx * 3 + 0] - voxel_grid_origin_x) / voxel_size);
int pt_grid_y = round((pts[pt_idx * 3 + 1] - voxel_grid_origin_y) / voxel_size);
int pt_grid_z = round((pts[pt_idx * 3 + 2] - voxel_grid_origin_z) / voxel_size);
voxel_grid_occ[pt_grid_z * voxel_grid_dim_y * voxel_grid_dim_x + pt_grid_y * voxel_grid_dim_x + pt_grid_x] = 1.0f;
}
// Initialize TDF voxel grid
float * voxel_grid_TDF = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
memset(voxel_grid_TDF, 0, sizeof(float) * voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z);
// Copy voxel grids to GPU memory
float * gpu_voxel_grid_occ;
float * gpu_voxel_grid_TDF;
cudaMalloc(&gpu_voxel_grid_occ, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
cudaMalloc(&gpu_voxel_grid_TDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
marvin::checkCUDA(__LINE__, cudaGetLastError());
cudaMemcpy(gpu_voxel_grid_occ, voxel_grid_occ, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_voxel_grid_TDF, voxel_grid_TDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyHostToDevice);
marvin::checkCUDA(__LINE__, cudaGetLastError());
int CUDA_NUM_LOOPS = (int)ceil((float)(voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z) / (float)(CUDA_NUM_THREADS * CUDA_MAX_NUM_BLOCKS));
for (int CUDA_LOOP_IDX = 0; CUDA_LOOP_IDX < CUDA_NUM_LOOPS; ++CUDA_LOOP_IDX) {
ComputeTDF <<< CUDA_MAX_NUM_BLOCKS, CUDA_NUM_THREADS >>>(CUDA_LOOP_IDX, gpu_voxel_grid_occ, gpu_voxel_grid_TDF,
voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z,
voxel_size, trunc_margin);
}
// Load TDF voxel grid from GPU to CPU memory
cudaMemcpy(voxel_grid_TDF, gpu_voxel_grid_TDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyDeviceToHost);
marvin::checkCUDA(__LINE__, cudaGetLastError());
// Compute random surface keypoints in point cloud coordinates and voxel grid coordinates
std::cout << "Finding random surface keypoints..." << std::endl;
int num_keypts = 50 * 10;
float * keypts = new float[num_keypts * 3];
float * keypts_grid = new float[num_keypts * 3];
for (int keypt_idx = 0; keypt_idx < num_keypts; ++keypt_idx) {
int rand_idx = (int)(GetRandomFloat(0.0f, (float)num_pts));
keypts[keypt_idx * 3 + 0] = pts[rand_idx * 3 + 0];
keypts[keypt_idx * 3 + 1] = pts[rand_idx * 3 + 1];
keypts[keypt_idx * 3 + 2] = pts[rand_idx * 3 + 2];
keypts_grid[keypt_idx * 3 + 0] = round((pts[rand_idx * 3 + 0] - voxel_grid_origin_x) / voxel_size);
keypts_grid[keypt_idx * 3 + 1] = round((pts[rand_idx * 3 + 1] - voxel_grid_origin_y) / voxel_size);
keypts_grid[keypt_idx * 3 + 2] = round((pts[rand_idx * 3 + 2] - voxel_grid_origin_z) / voxel_size);
}
// Start Marvin network
marvin::Net convnet("3dmatch-net-test.json");
convnet.Malloc(marvin::Testing);
convnet.loadWeights("3dmatch-weights-snapshot-137000.marvin");
marvin::Response * rData;
marvin::Response * rFeat;
rData = convnet.getResponse("data");
rFeat = convnet.getResponse("feat");
std::cout << "3DMatch network architecture successfully loaded into Marvin!" << std::endl;
// Run forward passes with Marvin to get 3DMatch descriptors for each keypoint
int batch_size = 50;
int desc_size = 512;
StorageT * batch_TDF = new StorageT[batch_size * 30 * 30 * 30];
float * desc_3dmatch = new float[num_keypts * desc_size];
std::cout << "Computing 3DMatch descriptors for " << num_keypts << " keypoints..." << std::endl;
for (int batch_idx = 0; batch_idx < (num_keypts / batch_size); ++batch_idx) {
for (int keypt_idx = batch_idx * batch_size; keypt_idx < (batch_idx + 1) * batch_size; ++keypt_idx) {
int batch_keypt_idx = keypt_idx - batch_idx * batch_size;
float keypt_grid_x = keypts_grid[keypt_idx * 3 + 0];
float keypt_grid_y = keypts_grid[keypt_idx * 3 + 1];
float keypt_grid_z = keypts_grid[keypt_idx * 3 + 2];
// std::cout << keypt_idx << " " << batch_keypt_idx << std::endl;
// std::cout << " " << keypt_grid_x << " " << keypt_grid_y << " " << keypt_grid_z << std::endl;
// Get local TDF around keypoint
StorageT * local_voxel_grid_TDF = new StorageT[30 * 30 * 30];
int local_voxel_idx = 0;
for (int z = keypt_grid_z - 15; z < keypt_grid_z + 15; ++z)
for (int y = keypt_grid_y - 15; y < keypt_grid_y + 15; ++y)
for (int x = keypt_grid_x - 15; x < keypt_grid_x + 15; ++x) {
local_voxel_grid_TDF[local_voxel_idx] = CPUCompute2StorageT(voxel_grid_TDF[z * voxel_grid_dim_x * voxel_grid_dim_y + y * voxel_grid_dim_x + x]);
local_voxel_idx++;
}
for (int voxel_idx = 0; voxel_idx < 30 * 30 * 30; ++voxel_idx)
batch_TDF[batch_keypt_idx * 30 * 30 * 30 + voxel_idx] = local_voxel_grid_TDF[voxel_idx];
delete [] local_voxel_grid_TDF;
}
// Pass local TDF patches through Marvin
cudaMemcpy(rData->dataGPU, batch_TDF, rData->numBytes(), cudaMemcpyHostToDevice);
marvin::checkCUDA(__LINE__, cudaGetLastError());
convnet.forward();
// Copy descriptor vectors from GPU to CPU memory
StorageT * desc_vecs = new StorageT[batch_size * desc_size];
cudaMemcpy(desc_vecs, rFeat->dataGPU, rFeat->numBytes(), cudaMemcpyDeviceToHost);
marvin::checkCUDA(__LINE__, cudaGetLastError());
for (int desc_val_idx = 0; desc_val_idx < batch_size * desc_size; ++desc_val_idx)
desc_3dmatch[batch_idx * batch_size * desc_size + desc_val_idx] = CPUStorage2ComputeT(desc_vecs[desc_val_idx]);
delete [] desc_vecs;
}
// Save keypoints as binary file (Nx3 float array, row-major order)
std::cout << "Saving keypoints to disk (keypts.bin)..." << std::endl;
std::string keypts_saveto_path = out_prefix_filename + ".keypts.bin";
std::ofstream keypts_out_file(keypts_saveto_path, std::ios::binary | std::ios::out);
float num_keyptsf = (float) num_keypts;
keypts_out_file.write((char*)&num_keyptsf, sizeof(float));
for (int keypt_val_idx = 0; keypt_val_idx < num_keypts * 3; ++keypt_val_idx)
keypts_out_file.write((char*)&keypts[keypt_val_idx], sizeof(float));
keypts_out_file.close();
// Save 3DMatch descriptors as binary file (Nx512 float array, row-major order)
std::cout << "Saving 3DMatch descriptors to disk (desc.3dmatch.bin)..." << std::endl;
std::string desc_saveto_path = out_prefix_filename + ".desc.3dmatch.bin";
std::ofstream desc_out_file(desc_saveto_path, std::ios::binary | std::ios::out);
float desc_sizef = (float) desc_size;
desc_out_file.write((char*)&num_keyptsf, sizeof(float));
desc_out_file.write((char*)&desc_sizef, sizeof(float));
for (int desc_val_idx = 0; desc_val_idx < num_keypts * desc_size; ++desc_val_idx)
desc_out_file.write((char*)&desc_3dmatch[desc_val_idx], sizeof(float));
desc_out_file.close();
// // Save TDF voxel grid and its parameters to disk as binary file (float array)
// std::cout << "Saving TDF voxel grid values to disk (tdf.bin)..." << std::endl;
// std::string voxel_grid_saveto_path = "tdf.bin";
// std::ofstream tdf_out_file(voxel_grid_saveto_path, std::ios::binary | std::ios::out);
// float voxel_grid_dim_xf = (float) voxel_grid_dim_x;
// float voxel_grid_dim_yf = (float) voxel_grid_dim_y;
// float voxel_grid_dim_zf = (float) voxel_grid_dim_z;
// tdf_out_file.write((char*)&voxel_grid_dim_xf, sizeof(float));
// tdf_out_file.write((char*)&voxel_grid_dim_yf, sizeof(float));
// tdf_out_file.write((char*)&voxel_grid_dim_zf, sizeof(float));
// tdf_out_file.write((char*)&voxel_grid_origin_x, sizeof(float));
// tdf_out_file.write((char*)&voxel_grid_origin_y, sizeof(float));
// tdf_out_file.write((char*)&voxel_grid_origin_z, sizeof(float));
// tdf_out_file.write((char*)&voxel_size, sizeof(float));
// tdf_out_file.write((char*)&trunc_margin, sizeof(float));
// for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i)
// tdf_out_file.write((char*)&voxel_grid_TDF[i], sizeof(float));
// tdf_out_file.close();
return 0;
}
|
3e87af2734405208959e844373ff2060819fccb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8) {
if (comp > +1.4454E36f - coshf(var_1 - var_2)) {
float tmp_1 = -0.0f;
float tmp_2 = -1.2796E36f;
comp = tmp_2 * tmp_1 * var_3 + (+1.6015E-41f + var_4 / +1.2304E23f / powf(atanf(+1.7123E4f), (var_5 * (var_6 * +1.0468E-44f + -1.6785E35f))));
comp = sinf(-0.0f - ceilf((-1.3507E-36f + var_7 - var_8)));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9);
hipDeviceSynchronize();
return 0;
}
| 3e87af2734405208959e844373ff2060819fccb5.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8) {
if (comp > +1.4454E36f - coshf(var_1 - var_2)) {
float tmp_1 = -0.0f;
float tmp_2 = -1.2796E36f;
comp = tmp_2 * tmp_1 * var_3 + (+1.6015E-41f + var_4 / +1.2304E23f / powf(atanf(+1.7123E4f), (var_5 * (var_6 * +1.0468E-44f + -1.6785E35f))));
comp = sinf(-0.0f - ceilf((-1.3507E-36f + var_7 - var_8)));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9);
cudaDeviceSynchronize();
return 0;
}
|
0fb610b28dbbee7875596ecac1e1ce6092eb8e86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
to compile this code
>> nvcc simple_cuda_test.cu -o cuda_test
to profile execution speed
>> nvprof cuda_test.exe
runs in 456.87 us on my laptop with GeForce GT 745M
*/
#include <iostream>
#include <math.h>
// this function implements grid-sride loop
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
y[i] = x[i] + y[i];
}
}
int main(void)
{
int N = 1<<20;
float *x, *y;
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
// 4069x256
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
{
maxError = fmax(maxError, fabs(y[i]-3.0f));
}
std::cout << "Max error: " << maxError << std::endl;
hipFree(x);
hipFree(y);
return 0;
}
| 0fb610b28dbbee7875596ecac1e1ce6092eb8e86.cu | /*
to compile this code
>> nvcc simple_cuda_test.cu -o cuda_test
to profile execution speed
>> nvprof cuda_test.exe
runs in 456.87 us on my laptop with GeForce GT 745M
*/
#include <iostream>
#include <math.h>
// this function implements grid-sride loop
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
y[i] = x[i] + y[i];
}
}
int main(void)
{
int N = 1<<20;
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
// 4069x256
add<<<numBlocks, blockSize>>>(N, x, y);
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
{
maxError = fmax(maxError, fabs(y[i]-3.0f));
}
std::cout << "Max error: " << maxError << std::endl;
cudaFree(x);
cudaFree(y);
return 0;
}
|
be914515a2ee431b55b414b7009a7eafd12dfd68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Significant portions shamelessly stolen from Kandrot & Sanders CUDA by example
#include <stdlib.h> // for rand
#include <string.h> // for memcpy
#include <stdio.h> // for printf
#include <time.h> // for nanosleep
#include "common.h"
#include "nv/gpu_anim.h"
#include "nv/cpu_anim.h"
#ifdef _WIN32
#include <chrono>
#include <thread>
#endif
struct GPUDataBlock {
int HEIGHT;
int WIDTH;
int block_width;
};
struct Args_t globalArgs;
__global__ void compute_ripple_bitmap(uchar4* bitmap, int ticks, int WIDTH, int HEIGHT)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= WIDTH || y >= HEIGHT) {
return;
}
int offset = x + y * blockDim.x * gridDim.x;
float fx = x - WIDTH/2;
float fy = y - HEIGHT/2;
float d = sqrtf(fx * fx + fy * fy);
unsigned char grey = (unsigned char) (128.0f + 127.0f * cos(d/10.0f - ticks/7.0f) /
(d/10.0f + 1.0f));
bitmap[offset].x = grey;
bitmap[offset].y = grey;
bitmap[offset].z = grey;
bitmap[offset].w = 255;
}
void generate_frame(uchar4 * bitmap, GPUDataBlock * d, int ticks) {
static int count = 0;
dim3 grids(ceil((float)d->WIDTH/d->block_width), ceil((float)d->HEIGHT/d->block_width));
dim3 threads(d->block_width, d->block_width);
count++;
hipLaunchKernelGGL(( compute_ripple_bitmap), dim3(grids), dim3(threads), 0, 0, bitmap, ticks, d->WIDTH, d->HEIGHT);
timeout(&globalArgs, count);
}
struct CPUDataBlock {
GPUDataBlock gpu;
uchar4 *dev_bitmap;
CPUAnimBitmap *bitmap;
};
void generate_frame_cpu(CPUDataBlock * d, int ticks) {
generate_frame(d->dev_bitmap, &d->gpu, ticks);
gpuErrchk(hipMemcpy(d->bitmap->get_ptr(), d->dev_bitmap, d->bitmap->image_size(), hipMemcpyDeviceToHost));
}
void cleanup_gpu(GPUDataBlock *d)
{
}
void cleanup_cpu(CPUDataBlock *d) {
cleanup_gpu(&d->gpu);
hipFree(d->dev_bitmap);
}
void init_gpu(GPUDataBlock *d)
{
d->HEIGHT = globalArgs.height;
d->WIDTH = globalArgs.width;
d->block_width = globalArgs.blockwidth;
}
int main(int argc, char **argv) {
processArgs("ripple", argv, argc, &globalArgs);
switch(globalArgs.mode) {
case PROFILE_NONE:
printf("Set a profile mode. \"None\" is unimplemented.\n");
break;
case PROFILE_GPU:
{
GPUDataBlock data;
init_gpu(&data);
GPUAnimBitmap bitmap(data.WIDTH, data.HEIGHT, &data);
bitmap.anim_and_exit((void (*)(uchar4*,void*,int))generate_frame, NULL);
}
break;
case PROFILE_CPU:
{
CPUDataBlock data;
init_gpu(&data.gpu);
CPUAnimBitmap bitmap(data.gpu.WIDTH, data.gpu.HEIGHT, &data);
data.bitmap = &bitmap;
gpuErrchk(hipMalloc((void**)&data.dev_bitmap, data.bitmap->image_size()));
bitmap.anim_and_exit((void (*)(void*,int))generate_frame_cpu, (void(*)(void*))cleanup_cpu);
}
break;
default:
printf("Unhandled mode by ripple.\n");
exit(1);
}
}
| be914515a2ee431b55b414b7009a7eafd12dfd68.cu | // Significant portions shamelessly stolen from Kandrot & Sanders CUDA by example
#include <stdlib.h> // for rand
#include <string.h> // for memcpy
#include <stdio.h> // for printf
#include <time.h> // for nanosleep
#include "common.h"
#include "nv/gpu_anim.h"
#include "nv/cpu_anim.h"
#ifdef _WIN32
#include <chrono>
#include <thread>
#endif
struct GPUDataBlock {
int HEIGHT;
int WIDTH;
int block_width;
};
struct Args_t globalArgs;
__global__ void compute_ripple_bitmap(uchar4* bitmap, int ticks, int WIDTH, int HEIGHT)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= WIDTH || y >= HEIGHT) {
return;
}
int offset = x + y * blockDim.x * gridDim.x;
float fx = x - WIDTH/2;
float fy = y - HEIGHT/2;
float d = sqrtf(fx * fx + fy * fy);
unsigned char grey = (unsigned char) (128.0f + 127.0f * cos(d/10.0f - ticks/7.0f) /
(d/10.0f + 1.0f));
bitmap[offset].x = grey;
bitmap[offset].y = grey;
bitmap[offset].z = grey;
bitmap[offset].w = 255;
}
void generate_frame(uchar4 * bitmap, GPUDataBlock * d, int ticks) {
static int count = 0;
dim3 grids(ceil((float)d->WIDTH/d->block_width), ceil((float)d->HEIGHT/d->block_width));
dim3 threads(d->block_width, d->block_width);
count++;
compute_ripple_bitmap<<<grids, threads>>>(bitmap, ticks, d->WIDTH, d->HEIGHT);
timeout(&globalArgs, count);
}
struct CPUDataBlock {
GPUDataBlock gpu;
uchar4 *dev_bitmap;
CPUAnimBitmap *bitmap;
};
void generate_frame_cpu(CPUDataBlock * d, int ticks) {
generate_frame(d->dev_bitmap, &d->gpu, ticks);
gpuErrchk(cudaMemcpy(d->bitmap->get_ptr(), d->dev_bitmap, d->bitmap->image_size(), cudaMemcpyDeviceToHost));
}
void cleanup_gpu(GPUDataBlock *d)
{
}
void cleanup_cpu(CPUDataBlock *d) {
cleanup_gpu(&d->gpu);
cudaFree(d->dev_bitmap);
}
void init_gpu(GPUDataBlock *d)
{
d->HEIGHT = globalArgs.height;
d->WIDTH = globalArgs.width;
d->block_width = globalArgs.blockwidth;
}
int main(int argc, char **argv) {
processArgs("ripple", argv, argc, &globalArgs);
switch(globalArgs.mode) {
case PROFILE_NONE:
printf("Set a profile mode. \"None\" is unimplemented.\n");
break;
case PROFILE_GPU:
{
GPUDataBlock data;
init_gpu(&data);
GPUAnimBitmap bitmap(data.WIDTH, data.HEIGHT, &data);
bitmap.anim_and_exit((void (*)(uchar4*,void*,int))generate_frame, NULL);
}
break;
case PROFILE_CPU:
{
CPUDataBlock data;
init_gpu(&data.gpu);
CPUAnimBitmap bitmap(data.gpu.WIDTH, data.gpu.HEIGHT, &data);
data.bitmap = &bitmap;
gpuErrchk(cudaMalloc((void**)&data.dev_bitmap, data.bitmap->image_size()));
bitmap.anim_and_exit((void (*)(void*,int))generate_frame_cpu, (void(*)(void*))cleanup_cpu);
}
break;
default:
printf("Unhandled mode by ripple.\n");
exit(1);
}
}
|
37c5d2969d41764da52b2f0cebac8f1d2d469c27.hip | // !!! This is a file automatically generated by hipify!!!
// This example demonstrates the use of shared per-block arrays
// implement an optimized dense matrix multiplication algorithm.
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <math.h>
#include <algorithm>
#include <iostream>
#include <time.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "deviceQuery.h"
#define BLOCK_SIZE 8
void matrix_transpose_seq(float *a, float *b, size_t width){
for (int i =0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
b[i*width+j] = a[j*width+i];
}
}
}
__global__ void matrix_transpose_simple(const float *a, float *b, size_t width){
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
b[row*width+col] = a[col*width+row];
}
__global__ void matrix_transpose_shared (const float *a, float *b, size_t width) {
__shared__ float s[BLOCK_SIZE][BLOCK_SIZE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = bx*BLOCK_SIZE + tx;
int col = by*BLOCK_SIZE + ty;
int in = row + col * width;
int rowOut = by*BLOCK_SIZE + tx;
int colOut = bx*BLOCK_SIZE + ty;
int out = rowOut + colOut * width;
s[ty][tx] = a[in];
__syncthreads();
b[out] = s[tx][ty];
}
// compare two matrix to see if they are equal -- for verification
int matrixEqual( float *matrixA, float *matrixB, int m, int n ){
int bad = 0;
for ( int y = 0; y < m && !bad ; y++ )
for ( int x = 0; x < n && !bad ; x++ ){
if ( abs(matrixA[y*n+x] - matrixB[y*n+x]) > 1e-4 ){
bad++;
}
}
return !bad;
}
int main(void){
QueryDevice();
std::cout << std::endl;
for (int o = 0; o < 4; ++o) {
const size_t n = 1<<(9+o);
dim3 block_size(BLOCK_SIZE,BLOCK_SIZE);
dim3 num_blocks(n / block_size.x, n / block_size.y);
std::cout << "Matrix size: 2^" << (9+o) << "x" << "2^" << (9+o) << std::endl;
std::cout << "Block size: " << block_size.x << "x" << block_size.y << std::endl;
float *h_a, *h_s, *h_res;
h_a = (float *)malloc(sizeof(float) * n * n);
h_s = (float *)malloc(sizeof(float) * n * n);
h_res = (float*)malloc(sizeof(float) * n * n);
for(int i = 0; i < n*n; ++i){
h_a[i] = static_cast<float>(rand()) / RAND_MAX;
}
float *d_a = 0, *d_c = 0;
hipMalloc((void**)&d_a, sizeof(float) * n * n);
hipMalloc((void**)&d_c, sizeof(float) * n * n);
// copy input to the device
hipMemcpy(d_a, h_a, sizeof(float) * n * n, hipMemcpyHostToDevice);
// time the kernel launches using CUDA events
hipEvent_t launch_begin, launch_end;
hipEventCreate(&launch_begin);
hipEventCreate(&launch_end);
//time many sequential run and take the average
size_t num_launches = 10;
double average_seq_time;
struct timespec start, end;
if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) {
perror( "clock gettime" );
exit( EXIT_FAILURE );
}
for(int i = 0; i < num_launches; i++){
matrix_transpose_seq(h_a, h_s, n);
}
if( clock_gettime( CLOCK_REALTIME, &end) == -1 ) {
perror( "clock gettime" );
exit( EXIT_FAILURE );
}
//compute the time in s
average_seq_time = (end.tv_sec - start.tv_sec) + (double)(end.tv_nsec - start.tv_nsec) / 1e+9;
//take the average
average_seq_time /= num_launches;
std::cout << "Average sequential time: " << average_seq_time << "s" << std::endl;
//launch a single "warm-up" kernel
hipLaunchKernelGGL(( matrix_transpose_simple), dim3(num_blocks),dim3(block_size), 0, 0, d_a, d_c, n);
hipMemcpy(h_res, d_c, sizeof(float)*n*n, hipMemcpyDeviceToHost);
int equal = matrixEqual(h_res, h_s, n, n);
if(!equal) {
return 0;
}
// time many kernel launches and take the average time
float average_simple_time = 0;
for(int i = 0; i < num_launches; ++i){
// record a CUDA event immediately before and after the kernel launch
hipEventRecord(launch_begin,0);
hipLaunchKernelGGL(( matrix_transpose_simple), dim3(num_blocks),dim3(block_size), 0, 0, d_a, d_c, n);
hipEventRecord(launch_end,0);
hipEventSynchronize(launch_end);
// measure the time spent in the kernel
float time = 0;
hipEventElapsedTime(&time, launch_begin, launch_end);
average_simple_time += time;
}
average_simple_time /= num_launches;
std::cout << "Average global kernel time: " << average_simple_time << "ms" << std::endl;
hipLaunchKernelGGL(( matrix_transpose_shared), dim3(num_blocks),dim3(block_size), 0, 0, d_a, d_c, n);
hipMemcpy(h_res, d_c, sizeof(float)*n*n, hipMemcpyDeviceToHost);
equal = matrixEqual(h_res, h_s, n, n);
if(!equal){
return 0;
}
float average_tiled_time = 0;
for(int i = 0; i < num_launches; ++i){
hipEventRecord(launch_begin,0);
hipLaunchKernelGGL(( matrix_transpose_shared), dim3(num_blocks),dim3(block_size), 0, 0, d_a, d_c, n);
hipEventRecord(launch_end,0);
hipEventSynchronize(launch_end);
float time = 0;
hipEventElapsedTime(&time, launch_begin, launch_end);
average_tiled_time += time;
}
average_tiled_time /= num_launches;
std::cout << "Average shared kernel time: " << average_tiled_time << "ms" << std::endl;
float mem_size = sizeof(float) * n * n;
float num_ops = 2 * mem_size;
float seq_throughput = num_ops / average_seq_time / 1000000000.0f;
float simple_throughput = num_ops / (average_simple_time / 1000.0f) / 1000000000.0f;
float tiled_throughput = num_ops / (average_tiled_time / 1000.0f) / 1000000000.0f;
std::cout << "Throughput of sequential implementation: " << seq_throughput << "GB/s" << std::endl;
std::cout << "Throughput of global kernel: " << simple_throughput << "GB/s" << std::endl;
std::cout << "Throughput of shared kernel: " << tiled_throughput << "GB/s" << std::endl;
std::cout << "Performance speedup: global over sequential " << simple_throughput / seq_throughput << "x" << std::endl;
std::cout << "Performance speedup: shared over sequential " << tiled_throughput / seq_throughput << "x" << std::endl;
std::cout << "Performance speedup: shared over global " << tiled_throughput / simple_throughput << "x" << std::endl;
std::cout << "" << std::endl;
// destroy the CUDA events
hipEventDestroy(launch_begin);
hipEventDestroy(launch_end);
// deallocate device memory
hipFree(d_a);
hipFree(d_c);
free(h_a);
free(h_s);
free(h_res);
}
return 0;
}
| 37c5d2969d41764da52b2f0cebac8f1d2d469c27.cu | // This example demonstrates the use of shared per-block arrays
// implement an optimized dense matrix multiplication algorithm.
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <math.h>
#include <algorithm>
#include <iostream>
#include <time.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#include "deviceQuery.h"
#define BLOCK_SIZE 8
void matrix_transpose_seq(float *a, float *b, size_t width){
for (int i =0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
b[i*width+j] = a[j*width+i];
}
}
}
__global__ void matrix_transpose_simple(const float *a, float *b, size_t width){
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
b[row*width+col] = a[col*width+row];
}
__global__ void matrix_transpose_shared (const float *a, float *b, size_t width) {
__shared__ float s[BLOCK_SIZE][BLOCK_SIZE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = bx*BLOCK_SIZE + tx;
int col = by*BLOCK_SIZE + ty;
int in = row + col * width;
int rowOut = by*BLOCK_SIZE + tx;
int colOut = bx*BLOCK_SIZE + ty;
int out = rowOut + colOut * width;
s[ty][tx] = a[in];
__syncthreads();
b[out] = s[tx][ty];
}
// compare two matrix to see if they are equal -- for verification
int matrixEqual( float *matrixA, float *matrixB, int m, int n ){
int bad = 0;
for ( int y = 0; y < m && !bad ; y++ )
for ( int x = 0; x < n && !bad ; x++ ){
if ( abs(matrixA[y*n+x] - matrixB[y*n+x]) > 1e-4 ){
bad++;
}
}
return !bad;
}
int main(void){
QueryDevice();
std::cout << std::endl;
for (int o = 0; o < 4; ++o) {
const size_t n = 1<<(9+o);
dim3 block_size(BLOCK_SIZE,BLOCK_SIZE);
dim3 num_blocks(n / block_size.x, n / block_size.y);
std::cout << "Matrix size: 2^" << (9+o) << "x" << "2^" << (9+o) << std::endl;
std::cout << "Block size: " << block_size.x << "x" << block_size.y << std::endl;
float *h_a, *h_s, *h_res;
h_a = (float *)malloc(sizeof(float) * n * n);
h_s = (float *)malloc(sizeof(float) * n * n);
h_res = (float*)malloc(sizeof(float) * n * n);
for(int i = 0; i < n*n; ++i){
h_a[i] = static_cast<float>(rand()) / RAND_MAX;
}
float *d_a = 0, *d_c = 0;
cudaMalloc((void**)&d_a, sizeof(float) * n * n);
cudaMalloc((void**)&d_c, sizeof(float) * n * n);
// copy input to the device
cudaMemcpy(d_a, h_a, sizeof(float) * n * n, cudaMemcpyHostToDevice);
// time the kernel launches using CUDA events
cudaEvent_t launch_begin, launch_end;
cudaEventCreate(&launch_begin);
cudaEventCreate(&launch_end);
//time many sequential run and take the average
size_t num_launches = 10;
double average_seq_time;
struct timespec start, end;
if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) {
perror( "clock gettime" );
exit( EXIT_FAILURE );
}
for(int i = 0; i < num_launches; i++){
matrix_transpose_seq(h_a, h_s, n);
}
if( clock_gettime( CLOCK_REALTIME, &end) == -1 ) {
perror( "clock gettime" );
exit( EXIT_FAILURE );
}
//compute the time in s
average_seq_time = (end.tv_sec - start.tv_sec) + (double)(end.tv_nsec - start.tv_nsec) / 1e+9;
//take the average
average_seq_time /= num_launches;
std::cout << "Average sequential time: " << average_seq_time << "s" << std::endl;
//launch a single "warm-up" kernel
matrix_transpose_simple<<<num_blocks,block_size>>>(d_a, d_c, n);
cudaMemcpy(h_res, d_c, sizeof(float)*n*n, cudaMemcpyDeviceToHost);
int equal = matrixEqual(h_res, h_s, n, n);
if(!equal) {
return 0;
}
// time many kernel launches and take the average time
float average_simple_time = 0;
for(int i = 0; i < num_launches; ++i){
// record a CUDA event immediately before and after the kernel launch
cudaEventRecord(launch_begin,0);
matrix_transpose_simple<<<num_blocks,block_size>>>(d_a, d_c, n);
cudaEventRecord(launch_end,0);
cudaEventSynchronize(launch_end);
// measure the time spent in the kernel
float time = 0;
cudaEventElapsedTime(&time, launch_begin, launch_end);
average_simple_time += time;
}
average_simple_time /= num_launches;
std::cout << "Average global kernel time: " << average_simple_time << "ms" << std::endl;
matrix_transpose_shared<<<num_blocks,block_size>>>(d_a, d_c, n);
cudaMemcpy(h_res, d_c, sizeof(float)*n*n, cudaMemcpyDeviceToHost);
equal = matrixEqual(h_res, h_s, n, n);
if(!equal){
return 0;
}
float average_tiled_time = 0;
for(int i = 0; i < num_launches; ++i){
cudaEventRecord(launch_begin,0);
matrix_transpose_shared<<<num_blocks,block_size>>>(d_a, d_c, n);
cudaEventRecord(launch_end,0);
cudaEventSynchronize(launch_end);
float time = 0;
cudaEventElapsedTime(&time, launch_begin, launch_end);
average_tiled_time += time;
}
average_tiled_time /= num_launches;
std::cout << "Average shared kernel time: " << average_tiled_time << "ms" << std::endl;
float mem_size = sizeof(float) * n * n;
float num_ops = 2 * mem_size;
float seq_throughput = num_ops / average_seq_time / 1000000000.0f;
float simple_throughput = num_ops / (average_simple_time / 1000.0f) / 1000000000.0f;
float tiled_throughput = num_ops / (average_tiled_time / 1000.0f) / 1000000000.0f;
std::cout << "Throughput of sequential implementation: " << seq_throughput << "GB/s" << std::endl;
std::cout << "Throughput of global kernel: " << simple_throughput << "GB/s" << std::endl;
std::cout << "Throughput of shared kernel: " << tiled_throughput << "GB/s" << std::endl;
std::cout << "Performance speedup: global over sequential " << simple_throughput / seq_throughput << "x" << std::endl;
std::cout << "Performance speedup: shared over sequential " << tiled_throughput / seq_throughput << "x" << std::endl;
std::cout << "Performance speedup: shared over global " << tiled_throughput / simple_throughput << "x" << std::endl;
std::cout << "" << std::endl;
// destroy the CUDA events
cudaEventDestroy(launch_begin);
cudaEventDestroy(launch_end);
// deallocate device memory
cudaFree(d_a);
cudaFree(d_c);
free(h_a);
free(h_s);
free(h_res);
}
return 0;
}
|
414b0c83214287bcd77f7ae6bbab50cc6bec602c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void log_motion_estimation_cuda(uint8 *current, uint8 *previous, int *vectors_x, int *vectors_y, int *M_B, int *N_B, int *B, int *M, int *N) {
//obtain idx;
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= ((*M_B) * (*N_B))) return;
int x, y;
x = id / (*M_B);
y = id % (*M_B);
int dd;
for (dd = 4; dd > 1; dd--) { //--> gives 4 3 2
int step = 0;
if (dd == 4) {
//d=4;
step = 4;
} else if (dd == 3) {
step = 2;
} else if (dd == 2) {
step = 1;
} else {
continue;
}
int min = 255 * (*B) * (*B);
int bestx, besty, i, j, k, l;
for (i = -step; i < step + 1; i += step) /* For all candidate blocks */
for (j = -step; j < step + 1; j += step) {
int dist = 0;
for (k = 0; k < (*B); k++) /* For all pixels in the block */
for (l = 0; l < (*B); l++) {
int tmp9 = vectors_x[x * (*M_B) + y];
int p1, p2;
p1 = current[((*B) * x + k) * (*M) + (*B) * y + l];
if (((*B) * x + tmp9 + i + k) < 0 || ((*B) * x + tmp9 + i + k) > ((*N) - 1) ||
((*B) * y + tmp9 + j + l) < 0 || ((*B) * y + tmp9 + j + l) > ((*M) - 1)) {
p2 = 0;
} else {
p2 = previous[((*B) * x + tmp9 + i + k) * (*M) + (*B) * y + tmp9 + j + l];
}
dist += abs(p1 - p2);
}
if (dist < min) {
min = dist;
bestx = i;
besty = j;
}
}
int at = x * (*M_B) + y;
vectors_x[at] += bestx;
vectors_y[at] += besty;
}
} | 414b0c83214287bcd77f7ae6bbab50cc6bec602c.cu | #include "includes.h"
__global__ void log_motion_estimation_cuda(uint8 *current, uint8 *previous, int *vectors_x, int *vectors_y, int *M_B, int *N_B, int *B, int *M, int *N) {
//obtain idx;
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= ((*M_B) * (*N_B))) return;
int x, y;
x = id / (*M_B);
y = id % (*M_B);
int dd;
for (dd = 4; dd > 1; dd--) { //--> gives 4 3 2
int step = 0;
if (dd == 4) {
//d=4;
step = 4;
} else if (dd == 3) {
step = 2;
} else if (dd == 2) {
step = 1;
} else {
continue;
}
int min = 255 * (*B) * (*B);
int bestx, besty, i, j, k, l;
for (i = -step; i < step + 1; i += step) /* For all candidate blocks */
for (j = -step; j < step + 1; j += step) {
int dist = 0;
for (k = 0; k < (*B); k++) /* For all pixels in the block */
for (l = 0; l < (*B); l++) {
int tmp9 = vectors_x[x * (*M_B) + y];
int p1, p2;
p1 = current[((*B) * x + k) * (*M) + (*B) * y + l];
if (((*B) * x + tmp9 + i + k) < 0 || ((*B) * x + tmp9 + i + k) > ((*N) - 1) ||
((*B) * y + tmp9 + j + l) < 0 || ((*B) * y + tmp9 + j + l) > ((*M) - 1)) {
p2 = 0;
} else {
p2 = previous[((*B) * x + tmp9 + i + k) * (*M) + (*B) * y + tmp9 + j + l];
}
dist += abs(p1 - p2);
}
if (dist < min) {
min = dist;
bestx = i;
besty = j;
}
}
int at = x * (*M_B) + y;
vectors_x[at] += bestx;
vectors_y[at] += besty;
}
} |
62e643e3cee9ab0fe43a5356c26a9ec1b6b97016.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void fm_order2_dgrad_kernel(const float* in, const float* top_grad, float* dgrad, int batch_size, int slot_num, int emb_vec_size) {
int tid = threadIdx.x;
int bid = blockIdx.x;
if (tid < emb_vec_size && bid < batch_size) {
float emb_sum = 0.0f;
int offset = bid * slot_num * emb_vec_size + tid;
for (int i = 0; i < slot_num; i++) {
int index = offset + i * emb_vec_size;
emb_sum += in[index];
}
float tgrad = top_grad[bid * emb_vec_size + tid];
for (int i = 0; i < slot_num; i++) {
int index = offset + i * emb_vec_size;
dgrad[index] = tgrad * (emb_sum - in[index]);
}
}
} | 62e643e3cee9ab0fe43a5356c26a9ec1b6b97016.cu | #include "includes.h"
__global__ void fm_order2_dgrad_kernel(const float* in, const float* top_grad, float* dgrad, int batch_size, int slot_num, int emb_vec_size) {
int tid = threadIdx.x;
int bid = blockIdx.x;
if (tid < emb_vec_size && bid < batch_size) {
float emb_sum = 0.0f;
int offset = bid * slot_num * emb_vec_size + tid;
for (int i = 0; i < slot_num; i++) {
int index = offset + i * emb_vec_size;
emb_sum += in[index];
}
float tgrad = top_grad[bid * emb_vec_size + tid];
for (int i = 0; i < slot_num; i++) {
int index = offset + i * emb_vec_size;
dgrad[index] = tgrad * (emb_sum - in[index]);
}
}
} |
92c432bb8ce21b6d7d1c6503acd2202051598f39.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "big_add.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
unsigned int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
big_add), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
big_add), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
big_add), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 92c432bb8ce21b6d7d1c6503acd2202051598f39.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "big_add.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
unsigned int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
big_add<<<gridBlock,threadBlock>>>(a,b,c,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
big_add<<<gridBlock,threadBlock>>>(a,b,c,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
big_add<<<gridBlock,threadBlock>>>(a,b,c,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7b262c199418ff7eab16892ef60f534c09759b73.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ctranslate2/ops/bias_add.h"
#include "type_dispatch.h"
#include "cuda/helpers.h"
namespace ctranslate2 {
namespace ops {
template <typename T, typename AddFunc, typename Epilogue>
__global__ void bias_add_kernel(const T* value,
const T* bias,
T* output,
cuda::index_t depth,
const AddFunc& add_func,
const Epilogue& epilogue) {
const cuda::index_t i = blockIdx.x;
for (cuda::index_t j = threadIdx.x; j < depth; j += blockDim.x) {
const cuda::index_t index = i * depth + j;
output[index] = epilogue(add_func(value[index], bias[j]));
}
}
template <Device D, typename T>
void BiasAdd::compute(const StorageView& value,
const StorageView& bias,
StorageView& output) const {
const dim_t depth = bias.size();
const dim_t batch_size = value.size() / depth;
const dim_t blocks = ::min(batch_size, cuda::max_blocks);
const dim_t threads = ::min(depth, cuda::max_threads);
using DeviceT = cuda::device_type<T>;
const auto* x = cuda::device_cast(value.data<T>());
const auto* b = cuda::device_cast(bias.data<T>());
auto* y = cuda::device_cast(output.data<T>());
if (!_activation_type) {
hipLaunchKernelGGL(( bias_add_kernel), dim3(blocks), dim3(threads), 0, cuda::get_cuda_stream(),
x, b, y, depth, cuda::plus<DeviceT>(), thrust::identity<DeviceT>());
} else {
switch (*_activation_type) {
case ActivationType::ReLU:
hipLaunchKernelGGL(( bias_add_kernel), dim3(blocks), dim3(threads), 0, cuda::get_cuda_stream(),
x, b, y, depth, cuda::plus<DeviceT>(), cuda::relu_func<DeviceT>());
break;
case ActivationType::GELU:
hipLaunchKernelGGL(( bias_add_kernel), dim3(blocks), dim3(threads), 0, cuda::get_cuda_stream(),
x, b, y, depth, cuda::plus<DeviceT>(), cuda::gelu_func<DeviceT>());
break;
}
}
}
#define DECLARE_IMPL(T) \
template void \
BiasAdd::compute<Device::CUDA, T>(const StorageView& value, \
const StorageView& bias, \
StorageView& output) const;
DECLARE_IMPL(float)
DECLARE_IMPL(float16_t)
}
}
| 7b262c199418ff7eab16892ef60f534c09759b73.cu | #include "ctranslate2/ops/bias_add.h"
#include "type_dispatch.h"
#include "cuda/helpers.h"
namespace ctranslate2 {
namespace ops {
template <typename T, typename AddFunc, typename Epilogue>
__global__ void bias_add_kernel(const T* value,
const T* bias,
T* output,
cuda::index_t depth,
const AddFunc& add_func,
const Epilogue& epilogue) {
const cuda::index_t i = blockIdx.x;
for (cuda::index_t j = threadIdx.x; j < depth; j += blockDim.x) {
const cuda::index_t index = i * depth + j;
output[index] = epilogue(add_func(value[index], bias[j]));
}
}
template <Device D, typename T>
void BiasAdd::compute(const StorageView& value,
const StorageView& bias,
StorageView& output) const {
const dim_t depth = bias.size();
const dim_t batch_size = value.size() / depth;
const dim_t blocks = std::min(batch_size, cuda::max_blocks);
const dim_t threads = std::min(depth, cuda::max_threads);
using DeviceT = cuda::device_type<T>;
const auto* x = cuda::device_cast(value.data<T>());
const auto* b = cuda::device_cast(bias.data<T>());
auto* y = cuda::device_cast(output.data<T>());
if (!_activation_type) {
bias_add_kernel<<<blocks, threads, 0, cuda::get_cuda_stream()>>>(
x, b, y, depth, cuda::plus<DeviceT>(), thrust::identity<DeviceT>());
} else {
switch (*_activation_type) {
case ActivationType::ReLU:
bias_add_kernel<<<blocks, threads, 0, cuda::get_cuda_stream()>>>(
x, b, y, depth, cuda::plus<DeviceT>(), cuda::relu_func<DeviceT>());
break;
case ActivationType::GELU:
bias_add_kernel<<<blocks, threads, 0, cuda::get_cuda_stream()>>>(
x, b, y, depth, cuda::plus<DeviceT>(), cuda::gelu_func<DeviceT>());
break;
}
}
}
#define DECLARE_IMPL(T) \
template void \
BiasAdd::compute<Device::CUDA, T>(const StorageView& value, \
const StorageView& bias, \
StorageView& output) const;
DECLARE_IMPL(float)
DECLARE_IMPL(float16_t)
}
}
|
e2026da1dd60a1a1e2584fbdac367e59fe94b1d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ hiprandState_t randomStates[256];
__global__ void Sign_V(const float* a, float* out, const int n)
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int i = blockId * blockDim.x + threadIdx.x;
if (i < n)
{
out[i] = copysignf(1.0f, a[i]);
}
} | e2026da1dd60a1a1e2584fbdac367e59fe94b1d2.cu | #include "includes.h"
__device__ curandState randomStates[256];
__global__ void Sign_V(const float* a, float* out, const int n)
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int i = blockId * blockDim.x + threadIdx.x;
if (i < n)
{
out[i] = copysignf(1.0f, a[i]);
}
} |
d78503ad66954cf629eaa98ca852279a8f7879c5.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define N 5
void fillMatrix(float *h_A, int size);
void mult(float *h_B, float *h_C, float *h_A, int n);
void printVector(float *h_A, int size);
int main(int argc, char const *argv[]) {
// Input matrix h_B and input vector h_C
float *h_B = (float *) malloc(N * N * sizeof(float));
float *h_C = (float *) malloc(N * sizeof(float));
// Result vector h_A
float *h_A = (float *) malloc(N * sizeof(float));
// Fill vector h_C and matrix h_B
fillMatrix(h_C, N);
fillMatrix(h_B, N * N);
// Save dot product between h_B and h_C in h_A
mult(h_B, h_C, h_A, N);
// Print the result vector
printVector(h_A, N);
//Free host memory
free(h_A);
free(h_B);
free(h_C);
return 0;
}
__global__
void dotProduct(float *d_B, float *d_C, float *d_A, int n)
{
float temp = 0;
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n)
{
for (int j = 0; j < n; j++)
temp = temp + (d_B[i * n + j] * d_C[j]);
d_A[i] = temp;
}
}
void fillMatrix(float *h_A, int size)
{
for (int i = 0; i < size; i++)
h_A[i] = i + 1;
}
void mult(float *h_B, float *h_C, float *h_A, int n)
{
// Define sizes of matrix and vectors in device memory
int B_size = N * N * sizeof(float);
int C_size = N * sizeof(float);
// Create device arrays
float *d_A, *d_B, *d_C;
// Allocate device memory for A, B, and C
// copy h_B and h_C to device memory
hipError_t err = hipMalloc((void **) &d_B, B_size);
if (err != hipSuccess)
{
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
hipMemcpy(d_B, h_B, B_size, hipMemcpyHostToDevice);
err = hipMalloc((void **) &d_C, C_size);
if (err != hipSuccess)
{
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
hipMemcpy(d_C, h_C, C_size, hipMemcpyHostToDevice);
err = hipMalloc((void **) &d_A, C_size);
if (err != hipSuccess)
{
printf("%s in %s at line %d", hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Launch kernel for each row
hipLaunchKernelGGL(( dotProduct), dim3(ceil(n / 256.0)), dim3(256), 0, 0, d_B, d_C, d_A, n);
// Copy the result vector from devive to host
hipMemcpy(h_A, d_A, C_size, hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_A);
hipFree(d_C);
hipFree(d_B);
}
void printVector(float *h_A, int size)
{
for (int i = 0; i < size; i++)
printf("%d ", (int)h_A[i]);
printf("\n");
}
| d78503ad66954cf629eaa98ca852279a8f7879c5.cu | #include <cuda.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define N 5
void fillMatrix(float *h_A, int size);
void mult(float *h_B, float *h_C, float *h_A, int n);
void printVector(float *h_A, int size);
int main(int argc, char const *argv[]) {
// Input matrix h_B and input vector h_C
float *h_B = (float *) malloc(N * N * sizeof(float));
float *h_C = (float *) malloc(N * sizeof(float));
// Result vector h_A
float *h_A = (float *) malloc(N * sizeof(float));
// Fill vector h_C and matrix h_B
fillMatrix(h_C, N);
fillMatrix(h_B, N * N);
// Save dot product between h_B and h_C in h_A
mult(h_B, h_C, h_A, N);
// Print the result vector
printVector(h_A, N);
//Free host memory
free(h_A);
free(h_B);
free(h_C);
return 0;
}
__global__
void dotProduct(float *d_B, float *d_C, float *d_A, int n)
{
float temp = 0;
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n)
{
for (int j = 0; j < n; j++)
temp = temp + (d_B[i * n + j] * d_C[j]);
d_A[i] = temp;
}
}
void fillMatrix(float *h_A, int size)
{
for (int i = 0; i < size; i++)
h_A[i] = i + 1;
}
void mult(float *h_B, float *h_C, float *h_A, int n)
{
// Define sizes of matrix and vectors in device memory
int B_size = N * N * sizeof(float);
int C_size = N * sizeof(float);
// Create device arrays
float *d_A, *d_B, *d_C;
// Allocate device memory for A, B, and C
// copy h_B and h_C to device memory
cudaError_t err = cudaMalloc((void **) &d_B, B_size);
if (err != cudaSuccess)
{
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaMemcpy(d_B, h_B, B_size, cudaMemcpyHostToDevice);
err = cudaMalloc((void **) &d_C, C_size);
if (err != cudaSuccess)
{
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaMemcpy(d_C, h_C, C_size, cudaMemcpyHostToDevice);
err = cudaMalloc((void **) &d_A, C_size);
if (err != cudaSuccess)
{
printf("%s in %s at line %d", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Launch kernel for each row
dotProduct<<<ceil(n / 256.0), 256>>>(d_B, d_C, d_A, n);
// Copy the result vector from devive to host
cudaMemcpy(h_A, d_A, C_size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A);
cudaFree(d_C);
cudaFree(d_B);
}
void printVector(float *h_A, int size)
{
for (int i = 0; i < size; i++)
printf("%d ", (int)h_A[i]);
printf("\n");
}
|
782e04472a89e3557cf1003c61a86e68df3c39e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <iostream>
#include <fstream>
#include "matrix.h"
int main()
{
//
matrix<int> m;
int rown, coln;
#define LOCAL_PATH "E:\\urfu\\parallel-computing\\cuda-matrix-sum\\Debug\\"
std::ifstream fin( LOCAL_PATH "m.txt" );
std::ofstream fout( LOCAL_PATH "m_double.txt" );
fin >> rown >> coln;
m.resize(rown, coln);
for (int i = 0; i < rown; ++i)
{
for (int j = 0; j < coln; ++j)
fin >> m[i][j];
}
matrix<int> m_sum = m + m;
m_sum.info();
for (int i = 0; i < rown; ++i)
{
for (int j = 0; j < coln; ++j)
{
fout << m_sum[i][j] << ' ';
}
fout << std::endl;
}
return 0;
}
| 782e04472a89e3557cf1003c61a86e68df3c39e9.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <iostream>
#include <fstream>
#include "matrix.h"
int main()
{
// Считаем значения для матрицы из файла
matrix<int> m;
int rown, coln;
#define LOCAL_PATH "E:\\urfu\\parallel-computing\\cuda-matrix-sum\\Debug\\"
std::ifstream fin( LOCAL_PATH "m.txt" );
std::ofstream fout( LOCAL_PATH "m_double.txt" );
fin >> rown >> coln;
m.resize(rown, coln);
for (int i = 0; i < rown; ++i)
{
for (int j = 0; j < coln; ++j)
fin >> m[i][j];
}
matrix<int> m_sum = m + m;
m_sum.info();
for (int i = 0; i < rown; ++i)
{
for (int j = 0; j < coln; ++j)
{
fout << m_sum[i][j] << ' ';
}
fout << std::endl;
}
return 0;
}
|
4f524cb51324ae0656194c8e8e665d5ac8d07913.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __NVCC__
template <class U>
__global__ void extractMin(unsigned int* PQ, unsigned int* PQ_size, int* expandNodes,int* expandNodes_size,U* Cx,int* openList,int N,int K);
template <class T,class U>
__global__ void A_star_expand(int* off, int* edge, T* W, U* Hx, int* parent, volatile U* Cx,
int* expandNodes, int* expandNodes_size, int* lock , int* flagfound, int* openList, int* nVFlag,
int N, int E, int K, int dest,
int flagDiff, int dE,
int* diff_off, int* diff_edge, unsigned int* diff_weight );
template <class U>
__global__ void keepHeapPQ(unsigned int* PQ,unsigned int* PQ_size,U* Cx,int N,int K);
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N);
template <class U>
__global__ void insertPQ(unsigned int* PQ,unsigned int* PQS,int* nextV,int* nVsize,U* Cx,int K,int N,int* openList);
template <class U>
__global__ void checkMIN(unsigned int* PQ, unsigned int* PQ_size,int* flagEnd,U* Cx,int dest,int N,int K);
template <class T, class U>
__global__ void propogateDel(int* delEdgesV,int delEdge, volatile U* Cx,
int* rev_offset,int* rev_edges,T* rev_weight,int N,int E,
U* Hx,volatile int* parent,int* parent_old,int* addFlag,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight,int dE);
template <class T, class U>
__global__ void propogateAdd(int* diff_off, int* diff_edges,T* diff_W,U* Hx,int* addFlag,
volatile U* Cx,int* lock, int* parent, int* parent_old, int N, int dE);
template <class T,class U>
__global__ void insert_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE);
template <class T,class U>
__global__ void delete_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* parent_old,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE,
int* rev_offset,int* rev_edges,T* rev_weight,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight);
template <class U>
__global__ void insertDest(int* PQ_size,U* Cx,int dest,int* openList);
template <class U>
__global__ void getCx(U* Cx,int dest,U* val);
///////////////////////////////////////////
#include "kernels/d_a_star_kernels.cu"
// #include "d_a_star.cuh"
#ifdef DEBUG
#include <cstdio>
#endif
template <class T,class U>
GPU_D_A_Star<T,U>:: GPU_D_A_Star(GPU_Dynamic_Graph<T> *graph, unsigned int start,unsigned int end, unsigned int K )
{
this->graph = graph;
this->start_node = start;
this->end_node = end;
this->num_pq = K;
this->flag_end = 0;
this->flag_found = 0;
this->is_set_hx = false;
this->next_vertices_size = 0;
this->num_threads = 512;
this->num_updates = 0;
this->update_file = NULL;
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: set_heuristics(U* hx)
{
this->Hx = hx;
is_set_hx = true;
int N = this->graph->get_graph().get_num_nodes();
gpuErrchk ( hipMalloc(&d_Hx,sizeof(U)*N ) );
gpuErrchk ( hipMemcpy(d_Hx,Hx,sizeof(U)*N,hipMemcpyHostToDevice) );
__alloc_cpu();
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: __alloc_cpu()
{
int N = this->graph->get_graph().get_num_nodes();
int K = this->num_pq;
this->PQ = (unsigned int*)malloc(sizeof(unsigned int)*N );
this->PQ_size = (unsigned int*)malloc(sizeof(unsigned int)*K);
this->Cx = (U*)malloc(sizeof(U)*N);
this->open_list = (int*)malloc(sizeof(int)*N);
this->parent = (int*)malloc(sizeof(int)*N);
this->parent_old = (int*)malloc(sizeof(int)*N);
this->next_vertices_flag = (int*)malloc(sizeof(int)*N);
this->next_vertices = (int*)malloc(sizeof(int)*N);
memset(this->parent,-1,sizeof(int)*N);
memset(this->parent_old,-1,sizeof(int)*N);
memset(this->open_list,-1,sizeof(int)*N);
memset(this->PQ_size,0,sizeof(int)*K);
memset(this->next_vertices_flag,-1,sizeof(int)*N);
//todo make it memset
for(int i=0;i<N;i++){
this->Cx[i] = INT_MAX;
}
//append start node
//insert startNode in PQ[0]
Cx[start_node]=Hx[start_node];
PQ[0]=start_node;
PQ_size[0]=1;
open_list[start_node]=0;
//allocate on GPU
__alloc_gpu();
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: __alloc_gpu()
{
int N = this->graph->get_graph().get_num_nodes();
gpuErrchk ( hipMalloc(&d_Cx,sizeof(U)*N ) );
gpuErrchk ( hipMalloc(&d_parent,sizeof(int)*N ) );
gpuErrchk ( hipMalloc(&d_parent_old,sizeof(int)*N ) );
gpuErrchk ( hipMalloc(&d_open_list,sizeof(int)*N ) );
gpuErrchk ( hipMalloc(&d_PQ,sizeof(unsigned int)*N ) );
gpuErrchk ( hipMalloc(&d_PQ_size,sizeof(unsigned int)*num_pq ) );
gpuErrchk ( hipMalloc(&d_lock,sizeof(int)*N) );
//for next set of vertices to add in PQ
gpuErrchk ( hipMalloc(&d_next_vertices,sizeof(int)*N) );
gpuErrchk ( hipMalloc(&d_next_vertices_size,sizeof(int)) );
gpuErrchk ( hipMalloc(&d_next_vertices_flag,sizeof(int)*N) );
//next nodes to expand
gpuErrchk ( hipMalloc(&d_expand_nodes,sizeof(int)*num_pq) ); //changed to K
gpuErrchk ( hipMalloc(&d_expand_nodes_size,sizeof(int)) );
//flag to end search
gpuErrchk( hipMalloc(&d_flag_end,sizeof(int)) );
gpuErrchk( hipMalloc(&d_flag_found,sizeof(int)) );
gpuErrchk ( hipMemset(d_next_vertices_size,0,sizeof(int)) );
gpuErrchk ( hipMemset(d_expand_nodes_size,0,sizeof(int)) );
gpuErrchk ( hipMemset(d_lock,0,sizeof(int)*N) );
//copy from cpu to gpu
gpuErrchk ( hipMemcpy(d_Cx,Cx,sizeof(U)*N,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(d_PQ_size,PQ_size,sizeof(unsigned int)*num_pq,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(d_parent,parent,sizeof(int)*N,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(d_open_list,open_list,sizeof(int)*N,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(d_flag_end,&flag_end,sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(d_flag_found,&flag_found,sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(d_next_vertices_flag,next_vertices_flag,sizeof(int)*N,hipMemcpyHostToDevice) );
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: extract_min()
{
//K parallel
int N = this->graph->get_graph().get_num_nodes();
int num_blocks = (num_pq+num_threads-1)/num_threads;
hipLaunchKernelGGL(( extractMin < U >) , dim3(num_blocks),dim3(num_threads) , 0, 0, d_PQ, d_PQ_size, d_expand_nodes, d_expand_nodes_size, d_Cx, d_open_list, N, num_pq);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: expand()
{
int N = this->graph->get_graph().get_num_nodes();
int E = this->graph->get_graph().get_num_edges();
int num_blocks = (num_pq+num_threads-1)/num_threads;
hipLaunchKernelGGL(( A_star_expand < T, U >) , dim3(num_blocks),dim3(num_threads), 0, 0,
this->graph->get_graph().get_offsets(), this->graph->get_graph().get_edges(), this->graph->get_graph().get_weights(),
d_Hx, d_parent, d_Cx,
d_expand_nodes,d_expand_nodes_size,
d_lock ,d_flag_found,d_open_list,d_next_vertices_flag,
N, E, num_pq, end_node,
false,0,
this->graph->get_diff_graph().get_offsets(),this->graph->get_diff_graph().get_edges(),this->graph->get_diff_graph().get_weights()
);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: maintain_heap()
{
int N = this->graph->get_graph().get_num_nodes();
int num_blocks = (num_pq+num_threads-1)/num_threads;
hipLaunchKernelGGL(( keepHeapPQ < U >) , dim3(num_blocks), dim3(num_threads), 0, 0, d_PQ, d_PQ_size, d_Cx, N, num_pq);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: set_flags()
{
int N = this->graph->get_graph().get_num_nodes();
int num_blocks = (N+num_threads-1)/num_threads; //N_num_blocks
hipLaunchKernelGGL(( setNV) , dim3(num_blocks),dim3(num_threads), 0, 0, d_next_vertices_flag, d_next_vertices, d_next_vertices_size, N);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: insert()
{
int N = this->graph->get_graph().get_num_nodes();
int num_blocks = (num_pq+num_threads-1)/num_threads;
hipLaunchKernelGGL(( insertPQ< U >) , dim3(num_blocks), dim3(num_threads) , 0, 0, d_PQ, d_PQ_size, d_next_vertices, d_next_vertices_size, d_Cx, num_pq, N, d_open_list);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: check_all_min_pq()
{
int N = this->graph->get_graph().get_num_nodes();
int num_blocks = (num_pq+num_threads-1)/num_threads;
hipLaunchKernelGGL(( checkMIN < U >) , dim3(num_blocks), dim3(num_threads) , 0, 0, d_PQ, d_PQ_size, d_flag_end, d_Cx, end_node, N, num_pq);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
}
template <class T, class U>
bool GPU_D_A_Star<T,U>:: is_empty_pq_cpu()
{
bool is_not_empty = false;
for(int i=0;i<num_pq;i++){
if(PQ_size[i]>0)
is_not_empty=true;
}
return !is_not_empty;
}
template <class T, class U>
std::vector<int> GPU_D_A_Star<T,U>:: initial_path()
{
if(!is_set_hx){
printf("[INFO] heuristics value not set\n");
printf("[DO] set heuristics to init algorithm\n");
exit(0);
}
int N = this->graph->get_graph().get_num_nodes();
while(flag_end == 0 && !is_empty_pq_cpu())
{
extract_min();
expand();
maintain_heap();
set_flags();
insert();
//copy
gpuErrchk( hipMemcpy(&flag_found, d_flag_found, sizeof(int), hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(PQ_size, d_PQ_size, sizeof(int)*num_pq, hipMemcpyDeviceToHost) );
//reset
gpuErrchk( hipMemcpy(d_next_vertices_flag, next_vertices_flag, sizeof(int)*N,hipMemcpyHostToDevice) );
//reset next insert array
gpuErrchk ( hipMemset(d_next_vertices_size,0,sizeof(int)) );
gpuErrchk ( hipMemset(d_expand_nodes_size,0,sizeof(int)) );
if( !is_empty_pq_cpu() && flag_found==1)
{
gpuErrchk( hipMemcpy(d_flag_end, &flag_found,sizeof(int),hipMemcpyHostToDevice) );
check_all_min_pq();
gpuErrchk( hipMemcpy(&flag_end,d_flag_end, sizeof(int),hipMemcpyDeviceToHost) );
}
}
gpuErrchk( hipMemcpy(parent, d_parent, sizeof(int)*N,hipMemcpyDeviceToHost) );
U dest_cost;
gpuErrchk( hipMemcpy(&dest_cost,d_Cx+end_node, sizeof(U),hipMemcpyDeviceToHost) );
std::vector<int> Path;
if(dest_cost != INT_MAX){
int p = this->end_node;
while(parent[p]!=-1){
Path.push_back(p);
p = parent[p];
}
Path.push_back(p);
}
std::reverse(Path.begin(),Path.end());
return Path;
}
template <class T, class U>
std::vector<int> GPU_D_A_Star<T,U>:: get_path()
{
std::vector<int> path;
if(num_updates==0){
path = initial_path();
num_updates++;
}
else
{
//check fo update file
//call dynamic prop
}
return path;
}
template <class T, class U>
std::vector<int> GPU_D_A_Star<T,U>:: updated_path()
{
std::vector<int> path;
return path;
}
template <class T, class U>
void GPU_D_A_Star<T,U>:: set_update_file(FILE* fptr)
{
this->update_file = fptr;
}
#endif | 4f524cb51324ae0656194c8e8e665d5ac8d07913.cu | #ifdef __NVCC__
template <class U>
__global__ void extractMin(unsigned int* PQ, unsigned int* PQ_size, int* expandNodes,int* expandNodes_size,U* Cx,int* openList,int N,int K);
template <class T,class U>
__global__ void A_star_expand(int* off, int* edge, T* W, U* Hx, int* parent, volatile U* Cx,
int* expandNodes, int* expandNodes_size, int* lock , int* flagfound, int* openList, int* nVFlag,
int N, int E, int K, int dest,
int flagDiff, int dE,
int* diff_off, int* diff_edge, unsigned int* diff_weight );
template <class U>
__global__ void keepHeapPQ(unsigned int* PQ,unsigned int* PQ_size,U* Cx,int N,int K);
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N);
template <class U>
__global__ void insertPQ(unsigned int* PQ,unsigned int* PQS,int* nextV,int* nVsize,U* Cx,int K,int N,int* openList);
template <class U>
__global__ void checkMIN(unsigned int* PQ, unsigned int* PQ_size,int* flagEnd,U* Cx,int dest,int N,int K);
template <class T, class U>
__global__ void propogateDel(int* delEdgesV,int delEdge, volatile U* Cx,
int* rev_offset,int* rev_edges,T* rev_weight,int N,int E,
U* Hx,volatile int* parent,int* parent_old,int* addFlag,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight,int dE);
template <class T, class U>
__global__ void propogateAdd(int* diff_off, int* diff_edges,T* diff_W,U* Hx,int* addFlag,
volatile U* Cx,int* lock, int* parent, int* parent_old, int N, int dE);
template <class T,class U>
__global__ void insert_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE);
template <class T,class U>
__global__ void delete_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* parent_old,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE,
int* rev_offset,int* rev_edges,T* rev_weight,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight);
template <class U>
__global__ void insertDest(int* PQ_size,U* Cx,int dest,int* openList);
template <class U>
__global__ void getCx(U* Cx,int dest,U* val);
///////////////////////////////////////////
#include "kernels/d_a_star_kernels.cu"
// #include "d_a_star.cuh"
#ifdef DEBUG
#include <cstdio>
#endif
template <class T,class U>
GPU_D_A_Star<T,U>:: GPU_D_A_Star(GPU_Dynamic_Graph<T> *graph, unsigned int start,unsigned int end, unsigned int K )
{
this->graph = graph;
this->start_node = start;
this->end_node = end;
this->num_pq = K;
this->flag_end = 0;
this->flag_found = 0;
this->is_set_hx = false;
this->next_vertices_size = 0;
this->num_threads = 512;
this->num_updates = 0;
this->update_file = NULL;
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: set_heuristics(U* hx)
{
this->Hx = hx;
is_set_hx = true;
int N = this->graph->get_graph().get_num_nodes();
gpuErrchk ( cudaMalloc(&d_Hx,sizeof(U)*N ) );
gpuErrchk ( cudaMemcpy(d_Hx,Hx,sizeof(U)*N,cudaMemcpyHostToDevice) );
__alloc_cpu();
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: __alloc_cpu()
{
int N = this->graph->get_graph().get_num_nodes();
int K = this->num_pq;
this->PQ = (unsigned int*)malloc(sizeof(unsigned int)*N );
this->PQ_size = (unsigned int*)malloc(sizeof(unsigned int)*K);
this->Cx = (U*)malloc(sizeof(U)*N);
this->open_list = (int*)malloc(sizeof(int)*N);
this->parent = (int*)malloc(sizeof(int)*N);
this->parent_old = (int*)malloc(sizeof(int)*N);
this->next_vertices_flag = (int*)malloc(sizeof(int)*N);
this->next_vertices = (int*)malloc(sizeof(int)*N);
memset(this->parent,-1,sizeof(int)*N);
memset(this->parent_old,-1,sizeof(int)*N);
memset(this->open_list,-1,sizeof(int)*N);
memset(this->PQ_size,0,sizeof(int)*K);
memset(this->next_vertices_flag,-1,sizeof(int)*N);
//todo make it memset
for(int i=0;i<N;i++){
this->Cx[i] = INT_MAX;
}
//append start node
//insert startNode in PQ[0]
Cx[start_node]=Hx[start_node];
PQ[0]=start_node;
PQ_size[0]=1;
open_list[start_node]=0;
//allocate on GPU
__alloc_gpu();
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: __alloc_gpu()
{
int N = this->graph->get_graph().get_num_nodes();
gpuErrchk ( cudaMalloc(&d_Cx,sizeof(U)*N ) );
gpuErrchk ( cudaMalloc(&d_parent,sizeof(int)*N ) );
gpuErrchk ( cudaMalloc(&d_parent_old,sizeof(int)*N ) );
gpuErrchk ( cudaMalloc(&d_open_list,sizeof(int)*N ) );
gpuErrchk ( cudaMalloc(&d_PQ,sizeof(unsigned int)*N ) );
gpuErrchk ( cudaMalloc(&d_PQ_size,sizeof(unsigned int)*num_pq ) );
gpuErrchk ( cudaMalloc(&d_lock,sizeof(int)*N) );
//for next set of vertices to add in PQ
gpuErrchk ( cudaMalloc(&d_next_vertices,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&d_next_vertices_size,sizeof(int)) );
gpuErrchk ( cudaMalloc(&d_next_vertices_flag,sizeof(int)*N) );
//next nodes to expand
gpuErrchk ( cudaMalloc(&d_expand_nodes,sizeof(int)*num_pq) ); //changed to K
gpuErrchk ( cudaMalloc(&d_expand_nodes_size,sizeof(int)) );
//flag to end search
gpuErrchk( cudaMalloc(&d_flag_end,sizeof(int)) );
gpuErrchk( cudaMalloc(&d_flag_found,sizeof(int)) );
gpuErrchk ( cudaMemset(d_next_vertices_size,0,sizeof(int)) );
gpuErrchk ( cudaMemset(d_expand_nodes_size,0,sizeof(int)) );
gpuErrchk ( cudaMemset(d_lock,0,sizeof(int)*N) );
//copy from cpu to gpu
gpuErrchk ( cudaMemcpy(d_Cx,Cx,sizeof(U)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(d_PQ_size,PQ_size,sizeof(unsigned int)*num_pq,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(d_parent,parent,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(d_open_list,open_list,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(d_flag_end,&flag_end,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(d_flag_found,&flag_found,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(d_next_vertices_flag,next_vertices_flag,sizeof(int)*N,cudaMemcpyHostToDevice) );
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: extract_min()
{
//K parallel
int N = this->graph->get_graph().get_num_nodes();
int num_blocks = (num_pq+num_threads-1)/num_threads;
extractMin < U > <<< num_blocks,num_threads >>>( d_PQ, d_PQ_size, d_expand_nodes, d_expand_nodes_size, d_Cx, d_open_list, N, num_pq);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: expand()
{
int N = this->graph->get_graph().get_num_nodes();
int E = this->graph->get_graph().get_num_edges();
int num_blocks = (num_pq+num_threads-1)/num_threads;
A_star_expand < T, U > <<<num_blocks,num_threads>>>(
this->graph->get_graph().get_offsets(), this->graph->get_graph().get_edges(), this->graph->get_graph().get_weights(),
d_Hx, d_parent, d_Cx,
d_expand_nodes,d_expand_nodes_size,
d_lock ,d_flag_found,d_open_list,d_next_vertices_flag,
N, E, num_pq, end_node,
false,0,
this->graph->get_diff_graph().get_offsets(),this->graph->get_diff_graph().get_edges(),this->graph->get_diff_graph().get_weights()
);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: maintain_heap()
{
int N = this->graph->get_graph().get_num_nodes();
int num_blocks = (num_pq+num_threads-1)/num_threads;
keepHeapPQ < U > <<< num_blocks, num_threads>>>(d_PQ, d_PQ_size, d_Cx, N, num_pq);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: set_flags()
{
int N = this->graph->get_graph().get_num_nodes();
int num_blocks = (N+num_threads-1)/num_threads; //N_num_blocks
setNV <<<num_blocks,num_threads>>>(d_next_vertices_flag, d_next_vertices, d_next_vertices_size, N);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: insert()
{
int N = this->graph->get_graph().get_num_nodes();
int num_blocks = (num_pq+num_threads-1)/num_threads;
insertPQ< U > <<<num_blocks, num_threads >>>(d_PQ, d_PQ_size, d_next_vertices, d_next_vertices_size, d_Cx, num_pq, N, d_open_list);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
}
template <class T,class U>
void GPU_D_A_Star<T,U>:: check_all_min_pq()
{
int N = this->graph->get_graph().get_num_nodes();
int num_blocks = (num_pq+num_threads-1)/num_threads;
checkMIN < U > <<< num_blocks, num_threads >>>(d_PQ, d_PQ_size, d_flag_end, d_Cx, end_node, N, num_pq);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
}
template <class T, class U>
bool GPU_D_A_Star<T,U>:: is_empty_pq_cpu()
{
bool is_not_empty = false;
for(int i=0;i<num_pq;i++){
if(PQ_size[i]>0)
is_not_empty=true;
}
return !is_not_empty;
}
template <class T, class U>
std::vector<int> GPU_D_A_Star<T,U>:: initial_path()
{
if(!is_set_hx){
printf("[INFO] heuristics value not set\n");
printf("[DO] set heuristics to init algorithm\n");
exit(0);
}
int N = this->graph->get_graph().get_num_nodes();
while(flag_end == 0 && !is_empty_pq_cpu())
{
extract_min();
expand();
maintain_heap();
set_flags();
insert();
//copy
gpuErrchk( cudaMemcpy(&flag_found, d_flag_found, sizeof(int), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(PQ_size, d_PQ_size, sizeof(int)*num_pq, cudaMemcpyDeviceToHost) );
//reset
gpuErrchk( cudaMemcpy(d_next_vertices_flag, next_vertices_flag, sizeof(int)*N,cudaMemcpyHostToDevice) );
//reset next insert array
gpuErrchk ( cudaMemset(d_next_vertices_size,0,sizeof(int)) );
gpuErrchk ( cudaMemset(d_expand_nodes_size,0,sizeof(int)) );
if( !is_empty_pq_cpu() && flag_found==1)
{
gpuErrchk( cudaMemcpy(d_flag_end, &flag_found,sizeof(int),cudaMemcpyHostToDevice) );
check_all_min_pq();
gpuErrchk( cudaMemcpy(&flag_end,d_flag_end, sizeof(int),cudaMemcpyDeviceToHost) );
}
}
gpuErrchk( cudaMemcpy(parent, d_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) );
U dest_cost;
gpuErrchk( cudaMemcpy(&dest_cost,d_Cx+end_node, sizeof(U),cudaMemcpyDeviceToHost) );
std::vector<int> Path;
if(dest_cost != INT_MAX){
int p = this->end_node;
while(parent[p]!=-1){
Path.push_back(p);
p = parent[p];
}
Path.push_back(p);
}
std::reverse(Path.begin(),Path.end());
return Path;
}
template <class T, class U>
std::vector<int> GPU_D_A_Star<T,U>:: get_path()
{
std::vector<int> path;
if(num_updates==0){
path = initial_path();
num_updates++;
}
else
{
//check fo update file
//call dynamic prop
}
return path;
}
template <class T, class U>
std::vector<int> GPU_D_A_Star<T,U>:: updated_path()
{
std::vector<int> path;
return path;
}
template <class T, class U>
void GPU_D_A_Star<T,U>:: set_update_file(FILE* fptr)
{
this->update_file = fptr;
}
#endif |
67d78a4b11e9becb258b4e0631f155cb8adeb030.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <algorithm>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <ctime>
#include <hip/hip_runtime.h>
using namespace std;
const int BLOCK_WIDTH = 8;
#if BWX32
const BLOCK_WX = 32;
#elif BWX24
const BLOCK_WX = 24;
#elif BWX16
const BLOCK_WX = 16;
#elif BWX8
const BLOCK_WX = 8;
#else
const BLOCK_WX = 4;
#endif
#if BWY32
const BLOCK_WY = 32;
#elif BWY24
const BLOCK_WY = 24;
#elif BWY16
const BLOCK_WY = 16;
#elif BWY8
const BLOCK_WY = 8;
#else
const BLOCK_WY = 4;
#endif
/**
* Author: Duo Donald Zhao
*/
// Sequential Matrix Multiplication for computing M * M'
void seq_MxMT(float *mr, float *m, int d){
for (int i = 0; i < d; i++){
for (int j = 0; j < d; j++){
float sum = 0;
for (int k = 0; k < d; k++){
sum += m[i*d+k] * m[j*d+k];
}
mr[i*d+j] = sum;
}
}
}
__global__ void cuda_MxMT_naive(float *d_mr, float *d_m, int d){
int bdx = blockIdx.x, bdy = blockIdx.y;
int tdx = threadIdx.x, tdy = threadIdx.y;
int j = bdx * blockDim.x + tdx;
int i = bdy * blockDim.y + tdy;
if (i < d && j < d){
float sum = 0;
for (int k = 0; k < d; k++){
sum += d_m[i*d+k] * d_m[j*d+k];
}
d_mr[i*d+j] = sum;
}
}
//power 2
__global__ void cuda_MxMT_v001 (float *d_mr, float *d_m, int d){
float rval = 0;
__shared__ float b_mr[BLOCK_WIDTH][BLOCK_WIDTH];
__shared__ float b_mc[BLOCK_WIDTH][BLOCK_WIDTH];
int bdx = blockIdx.x, bdy = blockIdx.y;
int tdx = threadIdx.x, tdy = threadIdx.y;
int row = bdx * blockDim.x + tdx;
int col = bdy * blockDim.y + tdy;
int rpos = row * d + col;
for (int bk = 0; bk < gridDim.y; bk++){
b_mr[tdx][tdy] = d_m[(bdx*BLOCK_WIDTH + tdx)*d + bk * BLOCK_WIDTH + tdy]; // (bdx*w+tdx, bk*w+tdy)
b_mc[tdx][tdy] = d_m[(bdy*BLOCK_WIDTH + tdx)*d + bk * BLOCK_WIDTH + tdy]; // (bdy*w+tdx, bk*w+tdy)
__syncthreads();
for (int tk = 0; tk < BLOCK_WIDTH; tk++){
rval += b_mr[tdx][tk] * b_mc[tdy][tk];
}
__syncthreads();
}
d_mr[rpos] = rval;
}
//general
__global__ void cuda_MxMT_v002 (float *d_mr, float *d_m, int d){
float rval = 0;
__shared__ float b_mr[BLOCK_WIDTH][BLOCK_WIDTH];
__shared__ float b_mc[BLOCK_WIDTH][BLOCK_WIDTH];
int bdx = blockIdx.x, bdy = blockIdx.y;
int tdx = threadIdx.x, tdy = threadIdx.y;
int row = bdx * blockDim.x + tdx;
int col = bdy * blockDim.y + tdy;
int rpos = row * d + col;
int srcRowR = bdx*BLOCK_WIDTH + tdx;
int srcRowC = bdy*BLOCK_WIDTH + tdx;
for (int bk = 0; bk < gridDim.y; bk++){
int srcCol = bk * BLOCK_WIDTH + tdy;
if (srcRowR < d && srcCol < d)
b_mr[tdx][tdy] = d_m[srcRowR*d + srcCol]; // (bdx*w+tdx, bk*w+tdy)
if (srcRowC < d && srcCol < d)
b_mc[tdx][tdy] = d_m[srcRowC*d + srcCol]; // (bdy*w+tdx, bk*w+tdy)
__syncthreads();
if (row < d && col < d){
for (int tk = 0; tk < BLOCK_WIDTH; tk++){
if (bk*BLOCK_WIDTH + tk < d)
rval += b_mr[tdx][tk] * b_mc[tdy][tk];
}
}
__syncthreads();
}
if (row < d && col < d){
d_mr[rpos] = rval;// + 0.1*row + 0.01*col;
}
}
__global__ void cuda_MxMT_v003 (float *d_mr, float *d_m, int d){
float rval = 0;
__shared__ float b_mr[BLOCK_WIDTH][BLOCK_WIDTH];
__shared__ float b_mc[BLOCK_WIDTH][BLOCK_WIDTH];
int bdx = blockIdx.x, bdy = blockIdx.y;
int tdx = threadIdx.x, tdy = threadIdx.y;
int row = bdx * blockDim.x + tdx;
int col = bdy * blockDim.y + tdy;
int rpos = row * d + col;
int srcRowR = bdx*BLOCK_WIDTH + tdx;
int srcRowC = bdy*BLOCK_WIDTH + tdx;
for (int bk = 0; bk < gridDim.y; ++bk){
int srcCol = bk * BLOCK_WIDTH + tdy;
if (srcRowR < d && srcCol < d)
b_mr[tdx][tdy] = d_m[srcRowR*d + srcCol]; // (bdx*w+tdx, bk*w+tdy)
if (srcRowC < d && srcCol < d)
b_mc[tdx][tdy] = d_m[srcRowC*d + srcCol]; // (bdy*w+tdx, bk*w+tdy)
__syncthreads();
if (row < d && col < d){
if (bk != gridDim.y - 1)
for (int tk = 0; tk < BLOCK_WIDTH; tk++)
rval += b_mr[tdx][tk] * b_mc[tdy][tk];
else
for (int tk = 0; tk < d % BLOCK_WIDTH; tk++)
rval += b_mr[tdx][tk] * b_mc[tdy][tk];
}
__syncthreads();
}
if (row < d && col < d){
d_mr[rpos] = rval;// + 0.1*row + 0.01*col;
}
}
__global__ void cuda_MxMT_v004 (float *d_mr, float *d_m, int d){
float rval = 0;
__shared__ float b_mr[BLOCK_WIDTH][BLOCK_WIDTH];
__shared__ float b_mc[BLOCK_WIDTH][BLOCK_WIDTH];
int bdx = blockIdx.x, bdy = blockIdx.y;
int tdx = threadIdx.x, tdy = threadIdx.y;
if (bdx > bdy)
return;
int row = bdx * blockDim.x + tdx;
int col = bdy * blockDim.y + tdy;
int rpos = row * d + col;
int rposPair = col * d + row;
int srcRowR = bdx*BLOCK_WIDTH + tdx;
int srcRowC = bdy*BLOCK_WIDTH + tdx;
for (int bk = 0; bk < gridDim.y; ++bk){
int srcCol = bk * BLOCK_WIDTH + tdy;
if (srcRowR < d && srcCol < d)
b_mr[tdx][tdy] = d_m[srcRowR*d + srcCol]; // (bdx*w+tdx, bk*w+tdy)
if (srcRowC < d && srcCol < d)
b_mc[tdx][tdy] = d_m[srcRowC*d + srcCol]; // (bdy*w+tdx, bk*w+tdy)
__syncthreads();
if (row < d && col < d){
if (bk != gridDim.y - 1)
for (int tk = 0; tk < BLOCK_WIDTH; tk++)
rval += b_mr[tdx][tk] * b_mc[tdy][tk];
else
for (int tk = 0; tk < d % BLOCK_WIDTH; tk++)
rval += b_mr[tdx][tk] * b_mc[tdy][tk];
}
__syncthreads();
}
if (row < d && col < d){
d_mr[rposPair] = d_mr[rpos] = rval;// + 0.1*row + 0.01*col;
}
}
__global__ void cuda_MxMT_v005 (float *d_mr, float *d_m, int d){
float rval = 0;
__shared__ float b_mr[BLOCK_WIDTH][BLOCK_WIDTH];
__shared__ float b_mc[BLOCK_WIDTH][BLOCK_WIDTH];
int bdx = blockIdx.x, bdy = blockIdx.y;
int tdx = threadIdx.x, tdy = threadIdx.y;
if (bdx > bdy)
return;
int row = bdx * blockDim.x + tdx;
int col = bdy * blockDim.y + tdy;
int rpos = row * d + col;
int rposPair = col * d + row;
int srcRowR = bdx*BLOCK_WIDTH + tdx;
int srcRowC = bdy*BLOCK_WIDTH + tdx;
for (int bk = 0; bk < gridDim.y; ++bk){
int srcCol = bk * BLOCK_WIDTH + tdy;
if (srcRowR < d && srcCol < d)
b_mr[tdx][tdy] = d_m[srcRowR*d + srcCol]; // (bdx*w+tdx, bk*w+tdy)
// if (srcRowC < d && srcCol < d)
// b_mc[tdx][tdy] = d_m[srcRowC*d + srcCol]; // (bdy*w+tdx, bk*w+tdy)
__syncthreads();
if (row < d && col < d){
// if (bk != gridDim.y - 1){
rval = b_mr[tdx][0] * b_mc[tdy][0]
+ b_mr[tdx][1] * b_mc[tdy][1];
// + b_mr[tdx][2] * b_mc[tdy][2]
// + b_mr[tdx][3] * b_mc[tdy][3];
// + b_mr[tdx][4] * b_mc[tdy][4]
// + b_mr[tdx][5] * b_mc[tdy][5]
// + b_mr[tdx][6] * b_mc[tdy][6]
// + b_mr[tdx][7] * b_mc[tdy][7];
// }
// else
// for (int tk = 0; tk < d % BLOCK_WIDTH; tk++)
// rval += b_mr[tdx][tk] * b_mc[tdy][tk];
}
__syncthreads();
}
if (row < d && col < d){
d_mr[rposPair] = d_mr[rpos] = rval;// + 0.1*row + 0.01*col;
}
}
// for CUDA consistency all matrix are represented by 1D
//Random Square Matrix Populator
void matrixPopulate(float *m, int d, bool fromRand = true){
if (fromRand){
srand(time(0));
for (int i = 0; i < d*d; i++){
m[i] = rand()/(float) RAND_MAX;
}
}
else{
for (int i = 0; i < d*d; i++){
m[i] = i % 1000;
}
}
}
// Matrix Display
void diplayMatrix(float *m, int d){
cout << "m={" << endl;
for (int i = 0; i < d; i++){
cout << "\t{";
for (int j = 0; j < d - 1; j++){
cout << setw(10) << m[i*d+j] << ", ";
}
cout << setw(10) << m[i*d + d - 1] <<
((i==d-1)? "}" : "},") << endl;
}
cout << "};" << endl;
}
//Matrix Verifier
float MatrixL2Diff(float *mt, float *ms, int d){
float sum = 0;
for (int i = 0; i < d * d; i++){
float diff = mt[i] - ms[i];
sum += diff * diff;
}
return sum;
}
int main(int argc, char *argv[]){
cout << "Hello, Welcome to the world of CUDA Matrix Multiplication!" << endl;
if (argc < 2){
cerr << "usage: " << "<program> <matrix dimension>" << endl;
return -1;
}
int dim = atoi(argv[1]);
cout << "Starting Computing Matrix Multiplication of dimension: " << dim << endl;
cout << "Initializting Matrix" << endl;
float *m = new float [dim*dim];
float *mr = new float [dim*dim]; //cuda result
float *mp = new float [dim*dim]; //processor comparison results
matrixPopulate(m, dim, false);
//copy(m, m + dim*dim, mr); //STL copying
//diplayMatrix(m, dim);
// diplayMatrix(mr, dim);
seq_MxMT(mp, m, dim);
//diplayMatrix(mp, dim);
cout << "Copying Data To GPU" << endl;
float *d_mr, *d_m;
hipMalloc((void **) &d_mr, dim*dim*sizeof(float));
hipMalloc((void **) &d_m, dim *dim*sizeof(float));
hipMemcpy(d_m, m, dim*dim*sizeof(float), hipMemcpyHostToDevice);
cout << "Invoking GPU kernel to Compute" << endl;
dim3 gDim(ceil((float) dim / BLOCK_WIDTH ), ceil((float)dim/BLOCK_WIDTH), 1);
// dim3 gDim(128, 128, 1);
dim3 bDim(BLOCK_WIDTH,BLOCK_WIDTH,1);
float time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
/**
* 1024 512 256 32
*/
// cuda_MxMT_naive<<< dim3(dim/256+1,dim/256+1,1), dim3(1024,1024,1) >>>(d_mr, d_m, dim);
hipLaunchKernelGGL(( cuda_MxMT_v005), dim3(gDim), dim3(bDim) , 0, 0, d_mr, d_m, dim);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipDeviceSynchronize();
cout << "Copying Data back from GPU kernel" << endl;
hipMemcpy(mr, d_mr, dim*dim*sizeof(float), hipMemcpyDeviceToHost);
cout << "Displaying Results: " << endl;
//diplayMatrix(mr, dim);
hipDeviceSynchronize();
cout << "L2 diff: " << MatrixL2Diff(mp, mr, dim) << endl;
cout << "Elapsed time: " << time << endl;
cout << "GPIS: " << ( 2.0e-6 * dim * dim * dim) /(time) << endl;
delete[] m;
delete[] mr;
delete[] mp;
hipFree(d_mr);
hipFree(d_m);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
cout << "prop.maxThreadsPerBlock: " << prop.maxThreadsPerBlock << endl;
int count;
hipGetDeviceCount(&count);
cout << count << endl;
return 0;
}
| 67d78a4b11e9becb258b4e0631f155cb8adeb030.cu | #include <iostream>
#include <iomanip>
#include <algorithm>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <ctime>
#include <cuda.h>
using namespace std;
const int BLOCK_WIDTH = 8;
#if BWX32
const BLOCK_WX = 32;
#elif BWX24
const BLOCK_WX = 24;
#elif BWX16
const BLOCK_WX = 16;
#elif BWX8
const BLOCK_WX = 8;
#else
const BLOCK_WX = 4;
#endif
#if BWY32
const BLOCK_WY = 32;
#elif BWY24
const BLOCK_WY = 24;
#elif BWY16
const BLOCK_WY = 16;
#elif BWY8
const BLOCK_WY = 8;
#else
const BLOCK_WY = 4;
#endif
/**
* Author: Duo Donald Zhao
*/
// Sequential Matrix Multiplication for computing M * M'
void seq_MxMT(float *mr, float *m, int d){
for (int i = 0; i < d; i++){
for (int j = 0; j < d; j++){
float sum = 0;
for (int k = 0; k < d; k++){
sum += m[i*d+k] * m[j*d+k];
}
mr[i*d+j] = sum;
}
}
}
__global__ void cuda_MxMT_naive(float *d_mr, float *d_m, int d){
int bdx = blockIdx.x, bdy = blockIdx.y;
int tdx = threadIdx.x, tdy = threadIdx.y;
int j = bdx * blockDim.x + tdx;
int i = bdy * blockDim.y + tdy;
if (i < d && j < d){
float sum = 0;
for (int k = 0; k < d; k++){
sum += d_m[i*d+k] * d_m[j*d+k];
}
d_mr[i*d+j] = sum;
}
}
//power 2
__global__ void cuda_MxMT_v001 (float *d_mr, float *d_m, int d){
float rval = 0;
__shared__ float b_mr[BLOCK_WIDTH][BLOCK_WIDTH];
__shared__ float b_mc[BLOCK_WIDTH][BLOCK_WIDTH];
int bdx = blockIdx.x, bdy = blockIdx.y;
int tdx = threadIdx.x, tdy = threadIdx.y;
int row = bdx * blockDim.x + tdx;
int col = bdy * blockDim.y + tdy;
int rpos = row * d + col;
for (int bk = 0; bk < gridDim.y; bk++){
b_mr[tdx][tdy] = d_m[(bdx*BLOCK_WIDTH + tdx)*d + bk * BLOCK_WIDTH + tdy]; // (bdx*w+tdx, bk*w+tdy)
b_mc[tdx][tdy] = d_m[(bdy*BLOCK_WIDTH + tdx)*d + bk * BLOCK_WIDTH + tdy]; // (bdy*w+tdx, bk*w+tdy)
__syncthreads();
for (int tk = 0; tk < BLOCK_WIDTH; tk++){
rval += b_mr[tdx][tk] * b_mc[tdy][tk];
}
__syncthreads();
}
d_mr[rpos] = rval;
}
//general
__global__ void cuda_MxMT_v002 (float *d_mr, float *d_m, int d){
float rval = 0;
__shared__ float b_mr[BLOCK_WIDTH][BLOCK_WIDTH];
__shared__ float b_mc[BLOCK_WIDTH][BLOCK_WIDTH];
int bdx = blockIdx.x, bdy = blockIdx.y;
int tdx = threadIdx.x, tdy = threadIdx.y;
int row = bdx * blockDim.x + tdx;
int col = bdy * blockDim.y + tdy;
int rpos = row * d + col;
int srcRowR = bdx*BLOCK_WIDTH + tdx;
int srcRowC = bdy*BLOCK_WIDTH + tdx;
for (int bk = 0; bk < gridDim.y; bk++){
int srcCol = bk * BLOCK_WIDTH + tdy;
if (srcRowR < d && srcCol < d)
b_mr[tdx][tdy] = d_m[srcRowR*d + srcCol]; // (bdx*w+tdx, bk*w+tdy)
if (srcRowC < d && srcCol < d)
b_mc[tdx][tdy] = d_m[srcRowC*d + srcCol]; // (bdy*w+tdx, bk*w+tdy)
__syncthreads();
if (row < d && col < d){
for (int tk = 0; tk < BLOCK_WIDTH; tk++){
if (bk*BLOCK_WIDTH + tk < d)
rval += b_mr[tdx][tk] * b_mc[tdy][tk];
}
}
__syncthreads();
}
if (row < d && col < d){
d_mr[rpos] = rval;// + 0.1*row + 0.01*col;
}
}
__global__ void cuda_MxMT_v003 (float *d_mr, float *d_m, int d){
float rval = 0;
__shared__ float b_mr[BLOCK_WIDTH][BLOCK_WIDTH];
__shared__ float b_mc[BLOCK_WIDTH][BLOCK_WIDTH];
int bdx = blockIdx.x, bdy = blockIdx.y;
int tdx = threadIdx.x, tdy = threadIdx.y;
int row = bdx * blockDim.x + tdx;
int col = bdy * blockDim.y + tdy;
int rpos = row * d + col;
int srcRowR = bdx*BLOCK_WIDTH + tdx;
int srcRowC = bdy*BLOCK_WIDTH + tdx;
for (int bk = 0; bk < gridDim.y; ++bk){
int srcCol = bk * BLOCK_WIDTH + tdy;
if (srcRowR < d && srcCol < d)
b_mr[tdx][tdy] = d_m[srcRowR*d + srcCol]; // (bdx*w+tdx, bk*w+tdy)
if (srcRowC < d && srcCol < d)
b_mc[tdx][tdy] = d_m[srcRowC*d + srcCol]; // (bdy*w+tdx, bk*w+tdy)
__syncthreads();
if (row < d && col < d){
if (bk != gridDim.y - 1)
for (int tk = 0; tk < BLOCK_WIDTH; tk++)
rval += b_mr[tdx][tk] * b_mc[tdy][tk];
else
for (int tk = 0; tk < d % BLOCK_WIDTH; tk++)
rval += b_mr[tdx][tk] * b_mc[tdy][tk];
}
__syncthreads();
}
if (row < d && col < d){
d_mr[rpos] = rval;// + 0.1*row + 0.01*col;
}
}
__global__ void cuda_MxMT_v004 (float *d_mr, float *d_m, int d){
float rval = 0;
__shared__ float b_mr[BLOCK_WIDTH][BLOCK_WIDTH];
__shared__ float b_mc[BLOCK_WIDTH][BLOCK_WIDTH];
int bdx = blockIdx.x, bdy = blockIdx.y;
int tdx = threadIdx.x, tdy = threadIdx.y;
if (bdx > bdy)
return;
int row = bdx * blockDim.x + tdx;
int col = bdy * blockDim.y + tdy;
int rpos = row * d + col;
int rposPair = col * d + row;
int srcRowR = bdx*BLOCK_WIDTH + tdx;
int srcRowC = bdy*BLOCK_WIDTH + tdx;
for (int bk = 0; bk < gridDim.y; ++bk){
int srcCol = bk * BLOCK_WIDTH + tdy;
if (srcRowR < d && srcCol < d)
b_mr[tdx][tdy] = d_m[srcRowR*d + srcCol]; // (bdx*w+tdx, bk*w+tdy)
if (srcRowC < d && srcCol < d)
b_mc[tdx][tdy] = d_m[srcRowC*d + srcCol]; // (bdy*w+tdx, bk*w+tdy)
__syncthreads();
if (row < d && col < d){
if (bk != gridDim.y - 1)
for (int tk = 0; tk < BLOCK_WIDTH; tk++)
rval += b_mr[tdx][tk] * b_mc[tdy][tk];
else
for (int tk = 0; tk < d % BLOCK_WIDTH; tk++)
rval += b_mr[tdx][tk] * b_mc[tdy][tk];
}
__syncthreads();
}
if (row < d && col < d){
d_mr[rposPair] = d_mr[rpos] = rval;// + 0.1*row + 0.01*col;
}
}
__global__ void cuda_MxMT_v005 (float *d_mr, float *d_m, int d){
float rval = 0;
__shared__ float b_mr[BLOCK_WIDTH][BLOCK_WIDTH];
__shared__ float b_mc[BLOCK_WIDTH][BLOCK_WIDTH];
int bdx = blockIdx.x, bdy = blockIdx.y;
int tdx = threadIdx.x, tdy = threadIdx.y;
if (bdx > bdy)
return;
int row = bdx * blockDim.x + tdx;
int col = bdy * blockDim.y + tdy;
int rpos = row * d + col;
int rposPair = col * d + row;
int srcRowR = bdx*BLOCK_WIDTH + tdx;
int srcRowC = bdy*BLOCK_WIDTH + tdx;
for (int bk = 0; bk < gridDim.y; ++bk){
int srcCol = bk * BLOCK_WIDTH + tdy;
if (srcRowR < d && srcCol < d)
b_mr[tdx][tdy] = d_m[srcRowR*d + srcCol]; // (bdx*w+tdx, bk*w+tdy)
// if (srcRowC < d && srcCol < d)
// b_mc[tdx][tdy] = d_m[srcRowC*d + srcCol]; // (bdy*w+tdx, bk*w+tdy)
__syncthreads();
if (row < d && col < d){
// if (bk != gridDim.y - 1){
rval = b_mr[tdx][0] * b_mc[tdy][0]
+ b_mr[tdx][1] * b_mc[tdy][1];
// + b_mr[tdx][2] * b_mc[tdy][2]
// + b_mr[tdx][3] * b_mc[tdy][3];
// + b_mr[tdx][4] * b_mc[tdy][4]
// + b_mr[tdx][5] * b_mc[tdy][5]
// + b_mr[tdx][6] * b_mc[tdy][6]
// + b_mr[tdx][7] * b_mc[tdy][7];
// }
// else
// for (int tk = 0; tk < d % BLOCK_WIDTH; tk++)
// rval += b_mr[tdx][tk] * b_mc[tdy][tk];
}
__syncthreads();
}
if (row < d && col < d){
d_mr[rposPair] = d_mr[rpos] = rval;// + 0.1*row + 0.01*col;
}
}
// for CUDA consistency all matrix are represented by 1D
//Random Square Matrix Populator
void matrixPopulate(float *m, int d, bool fromRand = true){
if (fromRand){
srand(time(0));
for (int i = 0; i < d*d; i++){
m[i] = rand()/(float) RAND_MAX;
}
}
else{
for (int i = 0; i < d*d; i++){
m[i] = i % 1000;
}
}
}
// Matrix Display
void diplayMatrix(float *m, int d){
cout << "m={" << endl;
for (int i = 0; i < d; i++){
cout << "\t{";
for (int j = 0; j < d - 1; j++){
cout << setw(10) << m[i*d+j] << ", ";
}
cout << setw(10) << m[i*d + d - 1] <<
((i==d-1)? "}" : "},") << endl;
}
cout << "};" << endl;
}
//Matrix Verifier
float MatrixL2Diff(float *mt, float *ms, int d){
float sum = 0;
for (int i = 0; i < d * d; i++){
float diff = mt[i] - ms[i];
sum += diff * diff;
}
return sum;
}
int main(int argc, char *argv[]){
cout << "Hello, Welcome to the world of CUDA Matrix Multiplication!" << endl;
if (argc < 2){
cerr << "usage: " << "<program> <matrix dimension>" << endl;
return -1;
}
int dim = atoi(argv[1]);
cout << "Starting Computing Matrix Multiplication of dimension: " << dim << endl;
cout << "Initializting Matrix" << endl;
float *m = new float [dim*dim];
float *mr = new float [dim*dim]; //cuda result
float *mp = new float [dim*dim]; //processor comparison results
matrixPopulate(m, dim, false);
//copy(m, m + dim*dim, mr); //STL copying
//diplayMatrix(m, dim);
// diplayMatrix(mr, dim);
seq_MxMT(mp, m, dim);
//diplayMatrix(mp, dim);
cout << "Copying Data To GPU" << endl;
float *d_mr, *d_m;
cudaMalloc((void **) &d_mr, dim*dim*sizeof(float));
cudaMalloc((void **) &d_m, dim *dim*sizeof(float));
cudaMemcpy(d_m, m, dim*dim*sizeof(float), cudaMemcpyHostToDevice);
cout << "Invoking GPU kernel to Compute" << endl;
dim3 gDim(ceil((float) dim / BLOCK_WIDTH ), ceil((float)dim/BLOCK_WIDTH), 1);
// dim3 gDim(128, 128, 1);
dim3 bDim(BLOCK_WIDTH,BLOCK_WIDTH,1);
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
/**
* 1024 512 256 32
*/
// cuda_MxMT_naive<<< dim3(dim/256+1,dim/256+1,1), dim3(1024,1024,1) >>>(d_mr, d_m, dim);
cuda_MxMT_v005<<< gDim, bDim >>>(d_mr, d_m, dim);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaDeviceSynchronize();
cout << "Copying Data back from GPU kernel" << endl;
cudaMemcpy(mr, d_mr, dim*dim*sizeof(float), cudaMemcpyDeviceToHost);
cout << "Displaying Results: " << endl;
//diplayMatrix(mr, dim);
cudaDeviceSynchronize();
cout << "L2 diff: " << MatrixL2Diff(mp, mr, dim) << endl;
cout << "Elapsed time: " << time << endl;
cout << "GPIS: " << ( 2.0e-6 * dim * dim * dim) /(time) << endl;
delete[] m;
delete[] mr;
delete[] mp;
cudaFree(d_mr);
cudaFree(d_m);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
cout << "prop.maxThreadsPerBlock: " << prop.maxThreadsPerBlock << endl;
int count;
cudaGetDeviceCount(&count);
cout << count << endl;
return 0;
}
|
0090ae02b327315f8b780dc431d50094e4a6e7dd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include "config.h"
#define TIMER_CREATE(t) \
hipEvent_t t##_start, t##_end; \
hipEventCreate(&t##_start); \
hipEventCreate(&t##_end);
#define TIMER_START(t) \
hipEventRecord(t##_start); \
hipEventSynchronize(t##_start); \
#define TIMER_END(t) \
hipEventRecord(t##_end); \
hipEventSynchronize(t##_end); \
hipEventElapsedTime(&t, t##_start, t##_end); \
hipEventDestroy(t##_start); \
hipEventDestroy(t##_end);
unsigned char *input_gpu;
unsigned char *output_gpu;
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline hipError_t checkCuda(hipError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
exit(-1);
}
#endif
return result;
}
__global__ void kernel(unsigned char *input,
unsigned char *output,
unsigned int size_x,
unsigned int size_y,
int threshold){
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
// Including border
__shared__ unsigned char inTile[TILE_SIZE+2][TILE_SIZE+2]; // input
// Read Input Data into Shared Memory
/////////////////////////////////////////////////////////////////////////////
int x = bx*TILE_SIZE+tx;
int y = by*TILE_SIZE+ty;
int location = y*(gridDim.x*TILE_SIZE)+x;
int sharedX = tx + 1;
int sharedY = ty + 1;
inTile[sharedY][sharedX] = input[location];
// Read Border Data into Shared Memory
/////////////////////////////////////////////////////////////////////////////////////
int posX;
int posY;
// Horizontal Border
if (ty == 0){
posX = sharedX;
posY = 0;
if (by == 0){
inTile[posY][posX] = 0;
} else {
inTile[posY][posX] = input[(y-1)*(gridDim.x*TILE_SIZE)+x];
}
} else if (ty == BLOCK_TILE_SIZE-1){
posX = sharedX;
posY = TILE_SIZE+1;
if (by == gridDim.y-1){
inTile[posY][posX] = 0;
} else {
inTile[posY][posX] = input[(y+1)*(gridDim.x*TILE_SIZE)+x];
}
}
// Vertical Border
if (tx == 0){
posX = 0;
posY = sharedY;
if (bx == 0){
inTile[posY][posX] = 0;
} else {
inTile[posY][posX] = input[y*(gridDim.x*TILE_SIZE)+(x-1)];
}
} else if (tx == BLOCK_TILE_SIZE-1){
posX = TILE_SIZE+1;
posY = sharedY;
if (bx == gridDim.x-1){
inTile[posY][posX] = 0;
} else {
inTile[posY][posX] = input[y*(gridDim.x*TILE_SIZE)+(x+1)];
}
}
// Corners for Border
if (tx == 0 && ty == 0){
if (bx == 0 || by == 0){
inTile[ 0][ 0] = 0;
} else {
inTile[ 0][ 0] = input [(y-1)*(gridDim.x*TILE_SIZE)+(x-1)];
}
} else if (tx == 0 && ty == BLOCK_TILE_SIZE-1){
if (bx == 0 || by == gridDim.y-1){
inTile[TILE_SIZE+1][ 0] = 0;
} else {
inTile[TILE_SIZE+1][ 0] = input [(y+1)*(gridDim.x*TILE_SIZE)+(x-1)];
}
} else if (tx == BLOCK_TILE_SIZE-1 && ty == 0){
if (bx == gridDim.x-1 || by == 0){
inTile[ 0][TILE_SIZE+1] = 0;
} else {
inTile[ 0][TILE_SIZE+1] = input [(y-1)*(gridDim.x*TILE_SIZE)+(x+1)];
}
} else if (tx == BLOCK_TILE_SIZE-1 && ty == BLOCK_TILE_SIZE-1){
if (bx == gridDim.x-1 || by == gridDim.y-1){
inTile[TILE_SIZE+1][TILE_SIZE+1] = 0;
} else {
inTile[TILE_SIZE+1][TILE_SIZE+1] = input [(y+1)*(gridDim.x*TILE_SIZE)+(x+1)];
}
}
__syncthreads();
// Algorithm
/////////////////////////////////////////////////////////////////////
int sum1 = inTile[sharedY-1][sharedX+1] - inTile[sharedY-1][sharedX-1]
+ 2 * (inTile[sharedY ][sharedX+1] - inTile[sharedY ][sharedX-1])
+ inTile[sharedY+1][sharedX+1] - inTile[sharedY+1][sharedX-1];
int sum2 = inTile[sharedY-1][sharedX-1] + inTile[sharedY-1][sharedX+1]
+ 2 * (inTile[sharedY-1][sharedX ] - inTile[sharedY+1][sharedX ])
- inTile[sharedY+1][sharedX-1] - inTile[sharedY+1][sharedX+1];
int magnitude = sqrt( (float) (sum1*sum1+sum2*sum2));
if(magnitude > threshold)
output[location] = 255;
else
output[location] = 0;
}
__global__ void warmup(unsigned char *input,
unsigned char *output){
// Read Input Data
/////////////////////////////////////////////////////////////////////////////
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*(gridDim.x*TILE_SIZE)+x;
unsigned char value = 0;
output[location] = value;
}
void gpu_function (unsigned char *data,
unsigned int height,
unsigned int width,
int threshold ){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(hipMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(hipMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(hipMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(hipMemcpy(input_gpu,
data,
size*sizeof(char),
hipMemcpyHostToDevice));
checkCuda(hipDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
// Kernel Call
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
hipLaunchKernelGGL(( kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, input_gpu,
output_gpu,
width,
height,
threshold);
checkCuda(hipPeekAtLastError());
checkCuda(hipDeviceSynchronize());
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(hipMemcpy(data,
output_gpu,
size*sizeof(unsigned char),
hipMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(hipFree(output_gpu));
checkCuda(hipFree(input_gpu));
}
void gpu_warmup (unsigned char *data,
unsigned int height,
unsigned int width){
#if defined (WARMUP)
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(hipMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(hipMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(hipMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(hipMemcpy(input_gpu,
data,
size*sizeof(char),
hipMemcpyHostToDevice));
checkCuda(hipDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
hipLaunchKernelGGL(( warmup), dim3(dimGrid), dim3(dimBlock), 0, 0, input_gpu,
output_gpu);
checkCuda(hipDeviceSynchronize());
// Retrieve results from the GPU
checkCuda(hipMemcpy(data,
output_gpu,
size*sizeof(unsigned char),
hipMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(hipFree(output_gpu));
checkCuda(hipFree(input_gpu));
#endif
}
| 0090ae02b327315f8b780dc431d50094e4a6e7dd.cu |
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#include "config.h"
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start); \
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
unsigned char *input_gpu;
unsigned char *output_gpu;
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
__global__ void kernel(unsigned char *input,
unsigned char *output,
unsigned int size_x,
unsigned int size_y,
int threshold){
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
// Including border
__shared__ unsigned char inTile[TILE_SIZE+2][TILE_SIZE+2]; // input
// Read Input Data into Shared Memory
/////////////////////////////////////////////////////////////////////////////
int x = bx*TILE_SIZE+tx;
int y = by*TILE_SIZE+ty;
int location = y*(gridDim.x*TILE_SIZE)+x;
int sharedX = tx + 1;
int sharedY = ty + 1;
inTile[sharedY][sharedX] = input[location];
// Read Border Data into Shared Memory
/////////////////////////////////////////////////////////////////////////////////////
int posX;
int posY;
// Horizontal Border
if (ty == 0){
posX = sharedX;
posY = 0;
if (by == 0){
inTile[posY][posX] = 0;
} else {
inTile[posY][posX] = input[(y-1)*(gridDim.x*TILE_SIZE)+x];
}
} else if (ty == BLOCK_TILE_SIZE-1){
posX = sharedX;
posY = TILE_SIZE+1;
if (by == gridDim.y-1){
inTile[posY][posX] = 0;
} else {
inTile[posY][posX] = input[(y+1)*(gridDim.x*TILE_SIZE)+x];
}
}
// Vertical Border
if (tx == 0){
posX = 0;
posY = sharedY;
if (bx == 0){
inTile[posY][posX] = 0;
} else {
inTile[posY][posX] = input[y*(gridDim.x*TILE_SIZE)+(x-1)];
}
} else if (tx == BLOCK_TILE_SIZE-1){
posX = TILE_SIZE+1;
posY = sharedY;
if (bx == gridDim.x-1){
inTile[posY][posX] = 0;
} else {
inTile[posY][posX] = input[y*(gridDim.x*TILE_SIZE)+(x+1)];
}
}
// Corners for Border
if (tx == 0 && ty == 0){
if (bx == 0 || by == 0){
inTile[ 0][ 0] = 0;
} else {
inTile[ 0][ 0] = input [(y-1)*(gridDim.x*TILE_SIZE)+(x-1)];
}
} else if (tx == 0 && ty == BLOCK_TILE_SIZE-1){
if (bx == 0 || by == gridDim.y-1){
inTile[TILE_SIZE+1][ 0] = 0;
} else {
inTile[TILE_SIZE+1][ 0] = input [(y+1)*(gridDim.x*TILE_SIZE)+(x-1)];
}
} else if (tx == BLOCK_TILE_SIZE-1 && ty == 0){
if (bx == gridDim.x-1 || by == 0){
inTile[ 0][TILE_SIZE+1] = 0;
} else {
inTile[ 0][TILE_SIZE+1] = input [(y-1)*(gridDim.x*TILE_SIZE)+(x+1)];
}
} else if (tx == BLOCK_TILE_SIZE-1 && ty == BLOCK_TILE_SIZE-1){
if (bx == gridDim.x-1 || by == gridDim.y-1){
inTile[TILE_SIZE+1][TILE_SIZE+1] = 0;
} else {
inTile[TILE_SIZE+1][TILE_SIZE+1] = input [(y+1)*(gridDim.x*TILE_SIZE)+(x+1)];
}
}
__syncthreads();
// Algorithm
/////////////////////////////////////////////////////////////////////
int sum1 = inTile[sharedY-1][sharedX+1] - inTile[sharedY-1][sharedX-1]
+ 2 * (inTile[sharedY ][sharedX+1] - inTile[sharedY ][sharedX-1])
+ inTile[sharedY+1][sharedX+1] - inTile[sharedY+1][sharedX-1];
int sum2 = inTile[sharedY-1][sharedX-1] + inTile[sharedY-1][sharedX+1]
+ 2 * (inTile[sharedY-1][sharedX ] - inTile[sharedY+1][sharedX ])
- inTile[sharedY+1][sharedX-1] - inTile[sharedY+1][sharedX+1];
int magnitude = sqrt( (float) (sum1*sum1+sum2*sum2));
if(magnitude > threshold)
output[location] = 255;
else
output[location] = 0;
}
__global__ void warmup(unsigned char *input,
unsigned char *output){
// Read Input Data
/////////////////////////////////////////////////////////////////////////////
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*(gridDim.x*TILE_SIZE)+x;
unsigned char value = 0;
output[location] = value;
}
void gpu_function (unsigned char *data,
unsigned int height,
unsigned int width,
int threshold ){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
data,
size*sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
// Kernel Call
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
kernel<<<dimGrid, dimBlock>>>(input_gpu,
output_gpu,
width,
height,
threshold);
checkCuda(cudaPeekAtLastError());
checkCuda(cudaDeviceSynchronize());
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(cudaMemcpy(data,
output_gpu,
size*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
}
void gpu_warmup (unsigned char *data,
unsigned int height,
unsigned int width){
#if defined (WARMUP)
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
data,
size*sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
warmup<<<dimGrid, dimBlock>>>(input_gpu,
output_gpu);
checkCuda(cudaDeviceSynchronize());
// Retrieve results from the GPU
checkCuda(cudaMemcpy(data,
output_gpu,
size*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
#endif
}
|
5eb111dae7cace1742a14a377434937abd0b4911.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
Copyright 2014-2015 Dake Feng, Peri LLC, dakefeng@gmail.com
This file is part of TomograPeri.
TomograPeri is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
TomograPeri is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TomograPeri. If not, see <http://www.gnu.org/licenses/>.
*/
#define blockx 16
#define blocky 16
__global__ void _weightInnerkernel_cuda(int num_slices, int num_grid, float beta, float *dev_F, float *dev_G, float*dev_wg8, float *dev_recon)
{
uint m = blockIdx.x*blockDim.x + threadIdx.x+1;
uint n = blockIdx.y*blockDim.y + threadIdx.y+1;
uint k = blockIdx.z;
int q;
int ind0, indg[8];
if ((k>=num_slices)||(n<1)||(n>=(num_grid-1))||(m<1)||(m>=(num_grid-1)))
return;
ind0 = m + n*num_grid + k*num_grid*num_grid;
indg[0] = ind0+1;
indg[1] = ind0-1;
indg[2] = ind0+num_grid;
indg[3] = ind0-num_grid;
indg[4] = ind0+num_grid+1;
indg[5] = ind0+num_grid-1;
indg[6] = ind0-num_grid+1;
indg[7] = ind0-num_grid-1;
for (q = 0; q < 8; q++) {
dev_F[ind0] += 2*beta*dev_wg8[q];
dev_G[ind0] -= 2*beta*dev_wg8[q]*(dev_recon[ind0]+dev_recon[indg[q]]);
}
} | 5eb111dae7cace1742a14a377434937abd0b4911.cu | #include "includes.h"
/*
Copyright 2014-2015 Dake Feng, Peri LLC, dakefeng@gmail.com
This file is part of TomograPeri.
TomograPeri is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
TomograPeri is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TomograPeri. If not, see <http://www.gnu.org/licenses/>.
*/
#define blockx 16
#define blocky 16
__global__ void _weightInnerkernel_cuda(int num_slices, int num_grid, float beta, float *dev_F, float *dev_G, float*dev_wg8, float *dev_recon)
{
uint m = blockIdx.x*blockDim.x + threadIdx.x+1;
uint n = blockIdx.y*blockDim.y + threadIdx.y+1;
uint k = blockIdx.z;
int q;
int ind0, indg[8];
if ((k>=num_slices)||(n<1)||(n>=(num_grid-1))||(m<1)||(m>=(num_grid-1)))
return;
ind0 = m + n*num_grid + k*num_grid*num_grid;
indg[0] = ind0+1;
indg[1] = ind0-1;
indg[2] = ind0+num_grid;
indg[3] = ind0-num_grid;
indg[4] = ind0+num_grid+1;
indg[5] = ind0+num_grid-1;
indg[6] = ind0-num_grid+1;
indg[7] = ind0-num_grid-1;
for (q = 0; q < 8; q++) {
dev_F[ind0] += 2*beta*dev_wg8[q];
dev_G[ind0] -= 2*beta*dev_wg8[q]*(dev_recon[ind0]+dev_recon[indg[q]]);
}
} |
cb1e33efa05912b65224e90c41bf4d652accf2e1.hip | // !!! This is a file automatically generated by hipify!!!
// A program to generate a 1-dimensional NumPy array that stores the users desired number of Brownian Paths, all generated in parallel on crisprs 3 GPUs.
// Also generates an array indicating the time at which each path crosses the upper threhsold supplied by the user. See documentation.
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include "../book.h"
#include <vector>
int *crossTimes = nullptr;
int *failCross = nullptr;
//Function to generate brownian paths, which are stored in results. Executes on the GPU, hence the __global__ identifier
__global__ void randomWalk(double *results, int *crossTimes, int T, int N, int numSims, double upperThreshold, double deviceID) {
// a variable to keep track of this simulation's position in the crossTimes array
int crossTimeIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (crossTimeIndex < numSims) {
// create random number generator
hiprandState_t state;
hiprand_init (blockIdx.x * (1000 * deviceID) + threadIdx.x + clock64(), 0, 0, &state);
double random;
// starting position of this siulation in results array
int start = (threadIdx.x + blockIdx.x * blockDim.x) * N;
// set default value of cross time for this simulation to 0, since the simulation hasn't crossed the threshold yet
crossTimes[crossTimeIndex] = 0;
// starting point of path is 0
results[start] = 0.0;
// boolean to keep track of whether this path has crossed
bool crossed = false;
for (int j = start + 1; j < start + N; j++) {
// generate random number
random = hiprand_normal_double(&state);
//calculate next step of path
results[j] = results[j-1] + random * sqrt((double) T / N);
// store crossing time as positive value if it has crossed the upper threshold. Negative value if crossed the lower threshold
if (!crossed && results[j] >= upperThreshold) {
crossTimes[crossTimeIndex] = j - start;
crossed = true;
}
}
}
}
// data structure to hold information for each GPU
struct DataStruct {
int deviceID; // id of gpu
int sims; // number of simulations to be executed on this gpu
double *resultArray; // array to store brownian paths calculated on this gpu
int *crossArray; // array to store cross times calculates on this gpu
int N; // number of simulations on this gpu
double T; // parameter for brownian path equation
double upperThreshold;
};
// function to execute on each individual GPU
void* routine(void *voidData) {
DataStruct *data = (DataStruct*)voidData;
hipSetDevice(data->deviceID);
int sims = data->sims;
// allocate arrays on host to store results, as well as temporary arrays on gpu for our global function
double *dev_results;
double *partialResults = (double*)malloc(sims * data->N * sizeof(double));
int *dev_crossTimes;
int *partialCrossTimes = (int*)malloc(sims * sizeof(int));
hipMalloc(&dev_results, data->N * sims * sizeof(double));
hipMalloc(&dev_crossTimes, sims * sizeof(int));
// calculate number of blocks and threads for global function
int numBlocks = (511 + sims) / sims;
int numThreads = 512;
// call GPU function
hipLaunchKernelGGL(( randomWalk), dim3(numBlocks), dim3(numThreads), 0, 0, dev_results, dev_crossTimes, data->T, data->N, sims, data->upperThreshold, data->deviceID);
// transfer data on gpu to host
hipMemcpy(partialResults, dev_results , data->N * sims * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(partialCrossTimes, dev_crossTimes , sims * sizeof(int), hipMemcpyDeviceToHost);
data->resultArray = partialResults;
data->crossArray = partialCrossTimes;
// free gpu memory
hipFree(dev_results);
hipFree(dev_crossTimes);
return 0;
}
// host function to generate the results and crossTimes arrays, and then return the results array
// defined in 3gpubm.h in order to import into cython code (see GenerateNumPy3.pyx)
int main() {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
double T = 1;
int N = 6;
int numSims = 20;
double upperThreshold = 1;
// fill a data structure of each of crispr's 3 gpu's
DataStruct data[3];
data[0].deviceID = 0;
data[0].sims = numSims / 3;
data[0].N = N;
data[0].T = T;
data[0].upperThreshold = upperThreshold;
data[1].deviceID = 0;
data[1].sims = numSims / 3;
data[1].N = N;
data[1].T = T;
data[1].upperThreshold = upperThreshold;
data[2].deviceID = 0;
data[2].sims = numSims / 3 + numSims % 3;
data[2].N = N;
data[2].T = T;
data[2].upperThreshold = upperThreshold;
// start a separate thread for each gpu
CUTThread thread = start_thread(routine, &(data[0]));
CUTThread thread2 = start_thread(routine, &(data[1]));
routine(&(data[2]));
end_thread(thread);
end_thread(thread2);
double *results = new double[N * numSims]; // the main array to store the path for each simulations, with an index for each point along the path
crossTimes = new int[numSims]; // the array to store the cross time for each simulation
// get output of each gpu and concatenate the arrays
double *arr1 = data[0].resultArray;
int size1 = data[0].sims * N;
double *arr2 = data[1].resultArray;
int size2 = data[1].sims * N;
double *arr3 = data[2].resultArray;
int size3 = data[2].sims * N;
std::copy(arr1, arr1 + size1, results);
std::copy(arr2, arr2 + size2, results + size1);
std::copy(arr3, arr3 + size3, results + size1 + size2);
int *carr1 = data[0].crossArray;
size1 = data[0].sims;
int *carr2 = data[1].crossArray;
size2 = data[1].sims;
int *carr3 = data[2].crossArray;
size3 = data[2].sims;
std::copy(carr1, carr1 + size1, crossTimes);
std::copy(carr2, carr2 + size2, crossTimes + size1);
std::copy(carr3, carr3 + size3, crossTimes + size1 + size2);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
//printf("Time to generate: %3.1f ms/n", elapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
for (int i=0; i < (N * numSims); i++) {
printf("%f ", results[i]);
}
printf("\n");
printf("\n");
for (int i=0; i < (numSims); i++) {
printf("%d ", crossTimes[i]);
}
printf("\n");
return 0;
}
| cb1e33efa05912b65224e90c41bf4d652accf2e1.cu | // A program to generate a 1-dimensional NumPy array that stores the user’s desired number of Brownian Paths, all generated in parallel on crispr’s 3 GPU’s.
// Also generates an array indicating the time at which each path crosses the upper threhsold supplied by the user. See documentation.
#include <cuda.h>
#include <curand_kernel.h>
#include <stdio.h>
#include "../book.h"
#include <vector>
int *crossTimes = nullptr;
int *failCross = nullptr;
//Function to generate brownian paths, which are stored in results. Executes on the GPU, hence the __global__ identifier
__global__ void randomWalk(double *results, int *crossTimes, int T, int N, int numSims, double upperThreshold, double deviceID) {
// a variable to keep track of this simulation's position in the crossTimes array
int crossTimeIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (crossTimeIndex < numSims) {
// create random number generator
curandState_t state;
curand_init (blockIdx.x * (1000 * deviceID) + threadIdx.x + clock64(), 0, 0, &state);
double random;
// starting position of this siulation in results array
int start = (threadIdx.x + blockIdx.x * blockDim.x) * N;
// set default value of cross time for this simulation to 0, since the simulation hasn't crossed the threshold yet
crossTimes[crossTimeIndex] = 0;
// starting point of path is 0
results[start] = 0.0;
// boolean to keep track of whether this path has crossed
bool crossed = false;
for (int j = start + 1; j < start + N; j++) {
// generate random number
random = curand_normal_double(&state);
//calculate next step of path
results[j] = results[j-1] + random * sqrt((double) T / N);
// store crossing time as positive value if it has crossed the upper threshold. Negative value if crossed the lower threshold
if (!crossed && results[j] >= upperThreshold) {
crossTimes[crossTimeIndex] = j - start;
crossed = true;
}
}
}
}
// data structure to hold information for each GPU
struct DataStruct {
int deviceID; // id of gpu
int sims; // number of simulations to be executed on this gpu
double *resultArray; // array to store brownian paths calculated on this gpu
int *crossArray; // array to store cross times calculates on this gpu
int N; // number of simulations on this gpu
double T; // parameter for brownian path equation
double upperThreshold;
};
// function to execute on each individual GPU
void* routine(void *voidData) {
DataStruct *data = (DataStruct*)voidData;
cudaSetDevice(data->deviceID);
int sims = data->sims;
// allocate arrays on host to store results, as well as temporary arrays on gpu for our global function
double *dev_results;
double *partialResults = (double*)malloc(sims * data->N * sizeof(double));
int *dev_crossTimes;
int *partialCrossTimes = (int*)malloc(sims * sizeof(int));
cudaMalloc(&dev_results, data->N * sims * sizeof(double));
cudaMalloc(&dev_crossTimes, sims * sizeof(int));
// calculate number of blocks and threads for global function
int numBlocks = (511 + sims) / sims;
int numThreads = 512;
// call GPU function
randomWalk<<<numBlocks, numThreads>>>(dev_results, dev_crossTimes, data->T, data->N, sims, data->upperThreshold, data->deviceID);
// transfer data on gpu to host
cudaMemcpy(partialResults, dev_results , data->N * sims * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(partialCrossTimes, dev_crossTimes , sims * sizeof(int), cudaMemcpyDeviceToHost);
data->resultArray = partialResults;
data->crossArray = partialCrossTimes;
// free gpu memory
cudaFree(dev_results);
cudaFree(dev_crossTimes);
return 0;
}
// host function to generate the results and crossTimes arrays, and then return the results array
// defined in 3gpubm.h in order to import into cython code (see GenerateNumPy3.pyx)
int main() {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
double T = 1;
int N = 6;
int numSims = 20;
double upperThreshold = 1;
// fill a data structure of each of crispr's 3 gpu's
DataStruct data[3];
data[0].deviceID = 0;
data[0].sims = numSims / 3;
data[0].N = N;
data[0].T = T;
data[0].upperThreshold = upperThreshold;
data[1].deviceID = 0;
data[1].sims = numSims / 3;
data[1].N = N;
data[1].T = T;
data[1].upperThreshold = upperThreshold;
data[2].deviceID = 0;
data[2].sims = numSims / 3 + numSims % 3;
data[2].N = N;
data[2].T = T;
data[2].upperThreshold = upperThreshold;
// start a separate thread for each gpu
CUTThread thread = start_thread(routine, &(data[0]));
CUTThread thread2 = start_thread(routine, &(data[1]));
routine(&(data[2]));
end_thread(thread);
end_thread(thread2);
double *results = new double[N * numSims]; // the main array to store the path for each simulations, with an index for each point along the path
crossTimes = new int[numSims]; // the array to store the cross time for each simulation
// get output of each gpu and concatenate the arrays
double *arr1 = data[0].resultArray;
int size1 = data[0].sims * N;
double *arr2 = data[1].resultArray;
int size2 = data[1].sims * N;
double *arr3 = data[2].resultArray;
int size3 = data[2].sims * N;
std::copy(arr1, arr1 + size1, results);
std::copy(arr2, arr2 + size2, results + size1);
std::copy(arr3, arr3 + size3, results + size1 + size2);
int *carr1 = data[0].crossArray;
size1 = data[0].sims;
int *carr2 = data[1].crossArray;
size2 = data[1].sims;
int *carr3 = data[2].crossArray;
size3 = data[2].sims;
std::copy(carr1, carr1 + size1, crossTimes);
std::copy(carr2, carr2 + size2, crossTimes + size1);
std::copy(carr3, carr3 + size3, crossTimes + size1 + size2);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
//printf("Time to generate: %3.1f ms/n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
for (int i=0; i < (N * numSims); i++) {
printf("%f ", results[i]);
}
printf("\n");
printf("\n");
for (int i=0; i < (numSims); i++) {
printf("%d ", crossTimes[i]);
}
printf("\n");
return 0;
}
|
70643c153ff74e5648d58bc61990be266dec92f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<bits/stdc++.h>
using namespace std;
#define MAX_VAL ((int)1e8)
#define cudaCatchError(error) { gpuAssert((error), __FILE__, __LINE__); }
// Catch Cuda errors
inline void gpuAssert(hipError_t error, const char *file, int line, bool abort = false)
{
if (error != hipSuccess)
{
printf("\n====== Cuda Error Code %i ======\n %s in CUDA %s\n", error, hipGetErrorString(error));
printf("\nIn file :%s\nOn line: %d", file, line);
if(abort)
exit(-1);
}
}
__global__ void compute(int *d_r, int *d_c, int *d_depth, int *max_depth, int *Q1, int *Q2, int nodes){
int idx = threadIdx.x;
__shared__ int len1, len2, curr_depth;
int i;
for(i=idx; i<nodes; i+=1024){
d_depth[i] = MAX_VAL;
}
if(idx == 0){
d_depth[0] = 0;
curr_depth = 0;
len1 = 1;
len2 = 0;
Q1[0] = 0;
}
__syncthreads();
while(len1){
for(i=idx; i<len1; i+=1024){
for(int j=d_r[Q1[i]]; j<d_r[Q1[i]+1]; j++){
int v = d_c[j];
if(atomicCAS(&d_depth[v], MAX_VAL, d_depth[Q1[i]]+1) == MAX_VAL){
int t = atomicAdd(&len2,1);
Q2[t] = v;
}
}
}
__syncthreads();
if(idx==0){
for(i=0; i<len2; i++){
Q1[i] = Q2[i];
}
len1 = len2;
len2 = 0;
curr_depth++;
}
__syncthreads();
}
max_depth[0] = curr_depth;
}
int main(int argc, char *argv[]){
if(argc<2){
cout << "Usage: " << argv[0] << " <graph_file_name>\n";
return 0;
}
ifstream input;
input.open(argv[1]);
int nodes, edges, i;
input >> nodes;
input >> edges;
// allocating host memory
int *h_r = (int*)malloc((nodes+1)*sizeof(int));
int *h_c = (int*)malloc(edges*2*sizeof(int));
// reading inputs
for(i=0; i<nodes+1; i++){
input >> h_r[i];
}
for(i=0; i<edges*2; i++){
input >> h_c[i];
}
// allocating device memory
int *Q1, *Q2, *d_r, *d_c, *d_depth, *max_depth;
hipMalloc((void**)&Q1, nodes*sizeof(int));
hipMalloc((void**)&Q2, nodes*sizeof(int));
hipMalloc((void**)&d_r, (nodes+1)*sizeof(int));
hipMalloc((void**)&d_c, edges*2*sizeof(int));
hipMalloc((void**)&d_depth, nodes*sizeof(int));
hipMalloc((void**)&max_depth, sizeof(int));
// copying data to device
hipMemcpy(d_r, h_r, (nodes+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_c, h_c, edges*2*sizeof(int), hipMemcpyHostToDevice);
// timer
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
// kernel call
printf("Starting Computation\n");
hipLaunchKernelGGL(( compute) , dim3(1), dim3(1024), 0, 0, d_r, d_c, d_depth, max_depth, Q1, Q2, nodes);
hipDeviceSynchronize();
printf("Finished Computation\n");
// timer
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
cout<<"Compute time in GPU: "<<milliseconds<<"ms"<<endl;
// copying results to host
int *result = (int *)malloc(sizeof(int));
cudaCatchError(hipMemcpy(result, max_depth, sizeof(int), hipMemcpyDeviceToHost));
printf("Depth : %d\n", result[0]-1);
// solution check
int *h_depth = (int*) malloc(nodes*sizeof(int));
hipMemcpy(h_depth, d_depth, nodes*sizeof(int), hipMemcpyDeviceToHost);
int *h_check_depth = (int*)malloc(nodes*sizeof(int));
freopen(argv[2], "r", stdin);
printf("malloc done\n");
for(int i = 0; i < nodes; i++) {
cin>>h_check_depth[i];
}
printf("Finished reading output file\n");
bool flag = true;
int count = 0;
printf("Starting checking\n");
for(int i = 0; i < nodes; i++) {
if(h_depth[i] != h_check_depth[i]) {
printf("Found %d, Expected %d\n",h_depth[i], h_check_depth[i]);
flag = false;
count++;
}
}
printf("Finished checking\n");
if(flag) {
cout<<"Solution is correct!\n";
}
else {
cout<<"Solution is incorrect!"<<endl;
cout<<count<<" testcases failed.\n";
}
return 0;
} | 70643c153ff74e5648d58bc61990be266dec92f5.cu | #include<bits/stdc++.h>
using namespace std;
#define MAX_VAL ((int)1e8)
#define cudaCatchError(error) { gpuAssert((error), __FILE__, __LINE__); }
// Catch Cuda errors
inline void gpuAssert(cudaError_t error, const char *file, int line, bool abort = false)
{
if (error != cudaSuccess)
{
printf("\n====== Cuda Error Code %i ======\n %s in CUDA %s\n", error, cudaGetErrorString(error));
printf("\nIn file :%s\nOn line: %d", file, line);
if(abort)
exit(-1);
}
}
__global__ void compute(int *d_r, int *d_c, int *d_depth, int *max_depth, int *Q1, int *Q2, int nodes){
int idx = threadIdx.x;
__shared__ int len1, len2, curr_depth;
int i;
for(i=idx; i<nodes; i+=1024){
d_depth[i] = MAX_VAL;
}
if(idx == 0){
d_depth[0] = 0;
curr_depth = 0;
len1 = 1;
len2 = 0;
Q1[0] = 0;
}
__syncthreads();
while(len1){
for(i=idx; i<len1; i+=1024){
for(int j=d_r[Q1[i]]; j<d_r[Q1[i]+1]; j++){
int v = d_c[j];
if(atomicCAS(&d_depth[v], MAX_VAL, d_depth[Q1[i]]+1) == MAX_VAL){
int t = atomicAdd(&len2,1);
Q2[t] = v;
}
}
}
__syncthreads();
if(idx==0){
for(i=0; i<len2; i++){
Q1[i] = Q2[i];
}
len1 = len2;
len2 = 0;
curr_depth++;
}
__syncthreads();
}
max_depth[0] = curr_depth;
}
int main(int argc, char *argv[]){
if(argc<2){
cout << "Usage: " << argv[0] << " <graph_file_name>\n";
return 0;
}
ifstream input;
input.open(argv[1]);
int nodes, edges, i;
input >> nodes;
input >> edges;
// allocating host memory
int *h_r = (int*)malloc((nodes+1)*sizeof(int));
int *h_c = (int*)malloc(edges*2*sizeof(int));
// reading inputs
for(i=0; i<nodes+1; i++){
input >> h_r[i];
}
for(i=0; i<edges*2; i++){
input >> h_c[i];
}
// allocating device memory
int *Q1, *Q2, *d_r, *d_c, *d_depth, *max_depth;
cudaMalloc((void**)&Q1, nodes*sizeof(int));
cudaMalloc((void**)&Q2, nodes*sizeof(int));
cudaMalloc((void**)&d_r, (nodes+1)*sizeof(int));
cudaMalloc((void**)&d_c, edges*2*sizeof(int));
cudaMalloc((void**)&d_depth, nodes*sizeof(int));
cudaMalloc((void**)&max_depth, sizeof(int));
// copying data to device
cudaMemcpy(d_r, h_r, (nodes+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, h_c, edges*2*sizeof(int), cudaMemcpyHostToDevice);
// timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// kernel call
printf("Starting Computation\n");
compute <<<1, 1024>>> (d_r, d_c, d_depth, max_depth, Q1, Q2, nodes);
cudaThreadSynchronize();
printf("Finished Computation\n");
// timer
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout<<"Compute time in GPU: "<<milliseconds<<"ms"<<endl;
// copying results to host
int *result = (int *)malloc(sizeof(int));
cudaCatchError(cudaMemcpy(result, max_depth, sizeof(int), cudaMemcpyDeviceToHost));
printf("Depth : %d\n", result[0]-1);
// solution check
int *h_depth = (int*) malloc(nodes*sizeof(int));
cudaMemcpy(h_depth, d_depth, nodes*sizeof(int), cudaMemcpyDeviceToHost);
int *h_check_depth = (int*)malloc(nodes*sizeof(int));
freopen(argv[2], "r", stdin);
printf("malloc done\n");
for(int i = 0; i < nodes; i++) {
cin>>h_check_depth[i];
}
printf("Finished reading output file\n");
bool flag = true;
int count = 0;
printf("Starting checking\n");
for(int i = 0; i < nodes; i++) {
if(h_depth[i] != h_check_depth[i]) {
printf("Found %d, Expected %d\n",h_depth[i], h_check_depth[i]);
flag = false;
count++;
}
}
printf("Finished checking\n");
if(flag) {
cout<<"Solution is correct!\n";
}
else {
cout<<"Solution is incorrect!"<<endl;
cout<<count<<" testcases failed.\n";
}
return 0;
} |
5c04721d1a0d1a2d8df491971338d7c9a630dab6.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_simulation.h"
#include "spring.h"
#include "load_obj.h"
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include <iostream>
using namespace std;
extern GLenum GL_MODE;
__global__ void get_face_normal(glm::vec4* g_pos_in, unsigned int* cloth_index, const unsigned int cloth_index_size, glm::vec3* cloth_face); //update cloth face normal
__global__ void verlet(glm::vec4* pos_vbo, glm::vec4 * g_pos_in, glm::vec4 * g_pos_old_in, glm::vec4 * g_pos_out, glm::vec4 * g_pos_old_out,glm::vec4* const_pos,
unsigned int* neigh1, unsigned int* neigh2,
glm::vec3* p_normal, unsigned int* vertex_adjface, glm::vec3* face_normal,
const unsigned int NUM_VERTICES,
BRTreeNode* leaf_nodes, BRTreeNode* internal_nodes, Primitive* primitives,glm::vec3* collision_force, int* collided_vertex); //verlet intergration
CUDA_Simulation::CUDA_Simulation()
{
}
CUDA_Simulation::~CUDA_Simulation()
{
}
CUDA_Simulation::CUDA_Simulation(Obj& cloth, Springs& springs):readID(0), writeID(1),sim_cloth(&cloth),NUM_ADJFACE(sim_parameter.NUM_ADJFACE),cuda_spring(&springs)
{
hipError_t cudaStatus = hipGraphicsGLRegisterBuffer(&cuda_vbo_resource, sim_cloth->vbo.array_buffer, hipGraphicsMapFlagsWriteDiscard); //register vbo
if (cudaStatus != hipSuccess)
fprintf(stderr, "register failed\n");
get_vertex_adjface(); //init_cuda
init_cuda(); //GPU
}
void CUDA_Simulation::simulate()
{
size_t num_bytes;
hipError_t cudaStatus = hipGraphicsMapResources(1, &cuda_vbo_resource, 0);
cudaStatus = hipGraphicsResourceGetMappedPointer((void **)&cuda_p_vertex, &num_bytes, cuda_vbo_resource);
cuda_p_normal = (glm::vec3*)((float*)cuda_p_vertex + 4 * sim_cloth->uni_vertices.size() + 2 * sim_cloth->uni_tex.size()); // normal
//cuda kernel compute .........
verlet_cuda();
cudaStatus = hipGraphicsUnmapResources(1, &cuda_vbo_resource, 0);
swap_buffer();
}
void CUDA_Simulation::init_cuda()
{
size_t heap_size = 256 * 1024 * 1024; //set heap size, the default is 8M
hipDeviceSetLimit(hipLimitMallocHeapSize, heap_size);
//sim_clothGPU
hipError_t cudaStatus;
const unsigned int vertices_bytes = sizeof(glm::vec4) * sim_cloth->uni_vertices.size();
cudaStatus = hipMalloc((void**)&const_cuda_pos, vertices_bytes); // cloth vertices (const)
cudaStatus = hipMalloc((void**)&X[0], vertices_bytes); // cloth vertices
cudaStatus = hipMalloc((void**)&X[1], vertices_bytes); // cloth vertices
cudaStatus = hipMalloc((void**)&X_last[0], vertices_bytes); // cloth old vertices
cudaStatus = hipMalloc((void**)&X_last[1], vertices_bytes); // cloth old vertices
cudaStatus = hipMalloc((void**)&collision_force, sizeof(glm::vec3) * sim_cloth->uni_vertices.size()); //collision response force
hipMemset(collision_force, 0, sizeof(glm::vec3) * sim_cloth->uni_vertices.size()); //initilize to 0
X_in = X[readID];
X_out = X[writeID];
X_last_in = X_last[readID];
X_last_out = X_last[writeID];
cudaStatus = hipMemcpy(const_cuda_pos, &sim_cloth->uni_vertices[0], vertices_bytes, hipMemcpyHostToDevice);
cudaStatus = hipMemcpy(X[0], &sim_cloth->uni_vertices[0], vertices_bytes, hipMemcpyHostToDevice);
cudaStatus = hipMemcpy(X_last[0], &sim_cloth->uni_vertices[0], vertices_bytes, hipMemcpyHostToDevice);
//normal + 3 + OPENGL
const unsigned int vertices_index_bytes = sizeof(unsigned int) * sim_cloth->vertex_index.size(); //
cudaStatus = hipMalloc((void**)&cuda_vertex_index, vertices_index_bytes);
cudaStatus = hipMemcpy(cuda_vertex_index, &sim_cloth->vertex_index[0], vertices_index_bytes, hipMemcpyHostToDevice);
const unsigned int face_normal_bytes = sizeof(glm::vec3) * sim_cloth->faces.size(); //
cudaStatus = hipMalloc((void**)&cuda_face_normal, face_normal_bytes);
const unsigned int vertex_adjface_bytes = sizeof(unsigned int) * vertex_adjface.size(); //
cudaStatus = hipMalloc((void**)&cuda_vertex_adjface, vertex_adjface_bytes);
cudaStatus = hipMemcpy(cuda_vertex_adjface, &vertex_adjface[0], vertex_adjface_bytes, hipMemcpyHostToDevice);
//GPU
cuda_neigh1 = cuda_spring->cuda_neigh1;
cuda_neigh2 = cuda_spring->cuda_neigh2;
//debug
hipMalloc((void**)&collided_vertex, sizeof(int)*sim_cloth->uni_vertices.size());
hipMemset(collided_vertex, 0, sizeof(int)*sim_cloth->uni_vertices.size());
cpu_collided_veretx.resize(sim_cloth->uni_vertices.size());
updated_vertex.resize(sim_cloth->uni_vertices.size());
}
void CUDA_Simulation::get_vertex_adjface()
{
vector<vector<unsigned int>> adjaceny(sim_cloth->uni_vertices.size());
for(int i=0;i<sim_cloth->faces.size();i++)
{
unsigned int f[3];
for(int j=0;j<3;j++)
{
f[j] = sim_cloth->faces[i].vertex_index[j];
adjaceny[f[j]].push_back(i);
}
}
//test
/*for(int i=0;i<10;i++)
{
for(int j=0;j<adjaceny[i].size();j++)
cout << adjaceny[i][j] << " ";
cout << endl;
}
*/
vertex_adjface.resize(sim_cloth->uni_vertices.size()*NUM_ADJFACE);
for(int i=0;i<adjaceny.size();i++)
{
int j;
for(j=0;j<adjaceny[i].size() && j<NUM_ADJFACE;j++)
{
vertex_adjface[i*NUM_ADJFACE+j] = adjaceny[i][j];
}
if(NUM_ADJFACE>adjaceny[i].size())
vertex_adjface[i*NUM_ADJFACE+j] = UINT_MAX; //Sentinel
}
}
void CUDA_Simulation::verlet_cuda()
{
hipError_t cudaStatus;
unsigned int numThreads0, numBlocks0;
computeGridSize(sim_cloth->faces.size(), 512, numBlocks0, numThreads0);
unsigned int cloth_index_size = sim_cloth->vertex_index.size();
hipLaunchKernelGGL(( get_face_normal) , dim3(numBlocks0), dim3(numThreads0) , 0, 0, X_in, cuda_vertex_index, cloth_index_size, cuda_face_normal);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
fprintf(stderr, "normal hipDeviceSynchronize returned error code %d after launching addKernel!\n%s\n", cudaStatus, hipGetErrorString(cudaStatus));
unsigned int numThreads, numBlocks;
unsigned int numParticles = sim_cloth->uni_vertices.size();
computeGridSize(numParticles, 512, numBlocks, numThreads);
hipLaunchKernelGGL(( verlet) , dim3(numBlocks), dim3(numThreads) , 0, 0, cuda_p_vertex, X_in, X_last_in, X_out, X_last_out,const_cuda_pos,
cuda_neigh1,cuda_neigh2,
cuda_p_normal,cuda_vertex_adjface,cuda_face_normal,
numParticles,
d_leaf_nodes,d_internal_nodes,d_primitives, collision_force,
collided_vertex);
// stop the CPU until the kernel has been executed
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "verlet hipDeviceSynchronize returned error code %d after launching addKernel!\n%s\n",
cudaStatus, hipGetErrorString(cudaStatus));
exit(-1);
}
//debug
//hipMemcpy(&cpu_collided_veretx[0],collided_vertex,sizeof(int)*numParticles, hipMemcpyDeviceToHost);
//hipMemcpy(&updated_vertex[0], cuda_p_vertex,sizeof(glm::vec4)*numParticles, hipMemcpyDeviceToHost);
//cout << "*****collided veretx index************" << endl;
//for (int i = 0; i < cpu_collided_veretx.size(); i++)
//{
// if (cpu_collided_veretx[i] == 1)
// cout << i << " ";
//}
//cout << endl;
}
void CUDA_Simulation::computeGridSize(unsigned int n, unsigned int blockSize, unsigned int &numBlocks, unsigned int &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = (n % numThreads != 0) ? (n / numThreads + 1) : (n / numThreads);
}
void CUDA_Simulation::swap_buffer()
{
int tmp = readID;
readID = writeID;
writeID = tmp;
X_in = X[readID];
X_out = X[writeID];
X_last_in = X_last[readID];
X_last_out = X_last[writeID];
}
void CUDA_Simulation::add_bvh(BVHAccel& bvh)
{
d_leaf_nodes = bvh.d_leaf_nodes;
d_internal_nodes = bvh.d_internal_nodes;
d_primitives = bvh.d_primitives;
}
void CUDA_Simulation::draw_collided_vertex()
{
//draw outline first
for (int i = 0; i < sim_cloth->faces.size(); i++)
{
glm::vec4 ver[3];
glm::vec3 normal[3];
for (int j = 0; j < 3; j++)
{
ver[j] = updated_vertex[sim_cloth->faces[i].vertex_index[j]];
}
glPointSize(1.0);
glBegin(GL_MODE);
glColor3f(1.0, 1.0,1.0);
for (int j = 0; j < 3; j++)
{
glVertex3f(ver[j].x, ver[j].y, ver[j].z);
}
glEnd();
}
for (int i = 0; i < cpu_collided_veretx.size(); i++)
{
glm::vec4 v = updated_vertex[i];
if (cpu_collided_veretx[i] == 1)
{
//draw it
glPointSize(10.0);
glBegin(GL_POINTS);
glColor3f(1.0, 0, 0);
glVertex3f(v.x, v.y, v.z);
glEnd();
}
}
} | 5c04721d1a0d1a2d8df491971338d7c9a630dab6.cu |
#include "cuda_simulation.h"
#include "spring.h"
#include "load_obj.h"
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <iostream>
using namespace std;
extern GLenum GL_MODE;
__global__ void get_face_normal(glm::vec4* g_pos_in, unsigned int* cloth_index, const unsigned int cloth_index_size, glm::vec3* cloth_face); //update cloth face normal
__global__ void verlet(glm::vec4* pos_vbo, glm::vec4 * g_pos_in, glm::vec4 * g_pos_old_in, glm::vec4 * g_pos_out, glm::vec4 * g_pos_old_out,glm::vec4* const_pos,
unsigned int* neigh1, unsigned int* neigh2,
glm::vec3* p_normal, unsigned int* vertex_adjface, glm::vec3* face_normal,
const unsigned int NUM_VERTICES,
BRTreeNode* leaf_nodes, BRTreeNode* internal_nodes, Primitive* primitives,glm::vec3* collision_force, int* collided_vertex); //verlet intergration
CUDA_Simulation::CUDA_Simulation()
{
}
CUDA_Simulation::~CUDA_Simulation()
{
}
CUDA_Simulation::CUDA_Simulation(Obj& cloth, Springs& springs):readID(0), writeID(1),sim_cloth(&cloth),NUM_ADJFACE(sim_parameter.NUM_ADJFACE),cuda_spring(&springs)
{
cudaError_t cudaStatus = cudaGraphicsGLRegisterBuffer(&cuda_vbo_resource, sim_cloth->vbo.array_buffer, cudaGraphicsMapFlagsWriteDiscard); //register vbo
if (cudaStatus != cudaSuccess)
fprintf(stderr, "register failed\n");
get_vertex_adjface(); //必须位于init_cuda前面,否则邻域数据为空
init_cuda(); //将相关数据传送GPU
}
void CUDA_Simulation::simulate()
{
size_t num_bytes;
cudaError_t cudaStatus = cudaGraphicsMapResources(1, &cuda_vbo_resource, 0);
cudaStatus = cudaGraphicsResourceGetMappedPointer((void **)&cuda_p_vertex, &num_bytes, cuda_vbo_resource);
cuda_p_normal = (glm::vec3*)((float*)cuda_p_vertex + 4 * sim_cloth->uni_vertices.size() + 2 * sim_cloth->uni_tex.size()); // 获取normal位置指针
//cuda kernel compute .........
verlet_cuda();
cudaStatus = cudaGraphicsUnmapResources(1, &cuda_vbo_resource, 0);
swap_buffer();
}
void CUDA_Simulation::init_cuda()
{
size_t heap_size = 256 * 1024 * 1024; //set heap size, the default is 8M
cudaDeviceSetLimit(cudaLimitMallocHeapSize, heap_size);
//将sim_cloth的点的坐标发送到GPU
cudaError_t cudaStatus;
const unsigned int vertices_bytes = sizeof(glm::vec4) * sim_cloth->uni_vertices.size();
cudaStatus = cudaMalloc((void**)&const_cuda_pos, vertices_bytes); // cloth vertices (const)
cudaStatus = cudaMalloc((void**)&X[0], vertices_bytes); // cloth vertices
cudaStatus = cudaMalloc((void**)&X[1], vertices_bytes); // cloth vertices
cudaStatus = cudaMalloc((void**)&X_last[0], vertices_bytes); // cloth old vertices
cudaStatus = cudaMalloc((void**)&X_last[1], vertices_bytes); // cloth old vertices
cudaStatus = cudaMalloc((void**)&collision_force, sizeof(glm::vec3) * sim_cloth->uni_vertices.size()); //collision response force
cudaMemset(collision_force, 0, sizeof(glm::vec3) * sim_cloth->uni_vertices.size()); //initilize to 0
X_in = X[readID];
X_out = X[writeID];
X_last_in = X_last[readID];
X_last_out = X_last[writeID];
cudaStatus = cudaMemcpy(const_cuda_pos, &sim_cloth->uni_vertices[0], vertices_bytes, cudaMemcpyHostToDevice);
cudaStatus = cudaMemcpy(X[0], &sim_cloth->uni_vertices[0], vertices_bytes, cudaMemcpyHostToDevice);
cudaStatus = cudaMemcpy(X_last[0], &sim_cloth->uni_vertices[0], vertices_bytes, cudaMemcpyHostToDevice);
//计算normal所需的数据:每个点邻接的面的索引 + 每个面的3个点的索引 + 以及所有点的索引(虽然OPENGL有该数据)
const unsigned int vertices_index_bytes = sizeof(unsigned int) * sim_cloth->vertex_index.size(); //点的索引
cudaStatus = cudaMalloc((void**)&cuda_vertex_index, vertices_index_bytes);
cudaStatus = cudaMemcpy(cuda_vertex_index, &sim_cloth->vertex_index[0], vertices_index_bytes, cudaMemcpyHostToDevice);
const unsigned int face_normal_bytes = sizeof(glm::vec3) * sim_cloth->faces.size(); //面的法向量
cudaStatus = cudaMalloc((void**)&cuda_face_normal, face_normal_bytes);
const unsigned int vertex_adjface_bytes = sizeof(unsigned int) * vertex_adjface.size(); //每个点邻接的面的索引
cudaStatus = cudaMalloc((void**)&cuda_vertex_adjface, vertex_adjface_bytes);
cudaStatus = cudaMemcpy(cuda_vertex_adjface, &vertex_adjface[0], vertex_adjface_bytes, cudaMemcpyHostToDevice);
//弹簧信息,即两级邻域点信息传送GPU
cuda_neigh1 = cuda_spring->cuda_neigh1;
cuda_neigh2 = cuda_spring->cuda_neigh2;
//debug
cudaMalloc((void**)&collided_vertex, sizeof(int)*sim_cloth->uni_vertices.size());
cudaMemset(collided_vertex, 0, sizeof(int)*sim_cloth->uni_vertices.size());
cpu_collided_veretx.resize(sim_cloth->uni_vertices.size());
updated_vertex.resize(sim_cloth->uni_vertices.size());
}
void CUDA_Simulation::get_vertex_adjface()
{
vector<vector<unsigned int>> adjaceny(sim_cloth->uni_vertices.size());
for(int i=0;i<sim_cloth->faces.size();i++)
{
unsigned int f[3];
for(int j=0;j<3;j++)
{
f[j] = sim_cloth->faces[i].vertex_index[j];
adjaceny[f[j]].push_back(i);
}
}
//test
/*for(int i=0;i<10;i++)
{
for(int j=0;j<adjaceny[i].size();j++)
cout << adjaceny[i][j] << " ";
cout << endl;
}
*/
vertex_adjface.resize(sim_cloth->uni_vertices.size()*NUM_ADJFACE);
for(int i=0;i<adjaceny.size();i++)
{
int j;
for(j=0;j<adjaceny[i].size() && j<NUM_ADJFACE;j++)
{
vertex_adjface[i*NUM_ADJFACE+j] = adjaceny[i][j];
}
if(NUM_ADJFACE>adjaceny[i].size())
vertex_adjface[i*NUM_ADJFACE+j] = UINT_MAX; //Sentinel
}
}
void CUDA_Simulation::verlet_cuda()
{
cudaError_t cudaStatus;
unsigned int numThreads0, numBlocks0;
computeGridSize(sim_cloth->faces.size(), 512, numBlocks0, numThreads0);
unsigned int cloth_index_size = sim_cloth->vertex_index.size();
get_face_normal <<<numBlocks0, numThreads0 >>>(X_in, cuda_vertex_index, cloth_index_size, cuda_face_normal);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
fprintf(stderr, "normal cudaDeviceSynchronize returned error code %d after launching addKernel!\n%s\n", cudaStatus, cudaGetErrorString(cudaStatus));
unsigned int numThreads, numBlocks;
unsigned int numParticles = sim_cloth->uni_vertices.size();
computeGridSize(numParticles, 512, numBlocks, numThreads);
verlet <<< numBlocks, numThreads >>>(cuda_p_vertex, X_in, X_last_in, X_out, X_last_out,const_cuda_pos,
cuda_neigh1,cuda_neigh2,
cuda_p_normal,cuda_vertex_adjface,cuda_face_normal,
numParticles,
d_leaf_nodes,d_internal_nodes,d_primitives, collision_force,
collided_vertex);
// stop the CPU until the kernel has been executed
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "verlet cudaDeviceSynchronize returned error code %d after launching addKernel!\n%s\n",
cudaStatus, cudaGetErrorString(cudaStatus));
exit(-1);
}
//debug
//cudaMemcpy(&cpu_collided_veretx[0],collided_vertex,sizeof(int)*numParticles, cudaMemcpyDeviceToHost);
//cudaMemcpy(&updated_vertex[0], cuda_p_vertex,sizeof(glm::vec4)*numParticles, cudaMemcpyDeviceToHost);
//cout << "*****collided veretx index************" << endl;
//for (int i = 0; i < cpu_collided_veretx.size(); i++)
//{
// if (cpu_collided_veretx[i] == 1)
// cout << i << " ";
//}
//cout << endl;
}
void CUDA_Simulation::computeGridSize(unsigned int n, unsigned int blockSize, unsigned int &numBlocks, unsigned int &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = (n % numThreads != 0) ? (n / numThreads + 1) : (n / numThreads);
}
void CUDA_Simulation::swap_buffer()
{
int tmp = readID;
readID = writeID;
writeID = tmp;
X_in = X[readID];
X_out = X[writeID];
X_last_in = X_last[readID];
X_last_out = X_last[writeID];
}
void CUDA_Simulation::add_bvh(BVHAccel& bvh)
{
d_leaf_nodes = bvh.d_leaf_nodes;
d_internal_nodes = bvh.d_internal_nodes;
d_primitives = bvh.d_primitives;
}
void CUDA_Simulation::draw_collided_vertex()
{
//draw outline first
for (int i = 0; i < sim_cloth->faces.size(); i++)
{
glm::vec4 ver[3];
glm::vec3 normal[3];
for (int j = 0; j < 3; j++)
{
ver[j] = updated_vertex[sim_cloth->faces[i].vertex_index[j]];
}
glPointSize(1.0);
glBegin(GL_MODE);
glColor3f(1.0, 1.0,1.0);
for (int j = 0; j < 3; j++)
{
glVertex3f(ver[j].x, ver[j].y, ver[j].z);
}
glEnd();
}
for (int i = 0; i < cpu_collided_veretx.size(); i++)
{
glm::vec4 v = updated_vertex[i];
if (cpu_collided_veretx[i] == 1)
{
//draw it
glPointSize(10.0);
glBegin(GL_POINTS);
glColor3f(1.0, 0, 0);
glVertex3f(v.x, v.y, v.z);
glEnd();
}
}
} |
f20e69cb2714d6ff0cf622344195e4dcb9ff9fc7.hip | // !!! This is a file automatically generated by hipify!!!
//
// Author: Marko Atanasievski
//
// Copyright (C) 2020 TANCOM SOFTWARE SOLUTIONS Ltd. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "cuda_util.h"
#include <iostream>
#include "mat.h"
__global__ void gpu_flatten_forward(const unsigned char* a_input, const ncnn::CudaMatInfo a_info, unsigned char* output) {
const int column = blockIdx.x * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int channel = blockIdx.z * blockDim.z + threadIdx.z;
if (column >= a_info.w || row >= a_info.h || channel >= a_info.c)
{
return;
}
const int input_index = channel * a_info.cstep * a_info.elemsize + row * a_info.w * a_info.elemsize + column * a_info.elemsize;
const int output_index = a_info.w * a_info.h * a_info.elemsize * channel + row * a_info.w * a_info.elemsize + column * a_info.elemsize;
memcpy((void*)(output + output_index), (void*)(a_input + input_index), a_info.elemsize);
}
namespace ncnn {
int flatten_cuda_forward(const unsigned char* bottom_blob, const ncnn::CudaMatInfo bottom_blob_info,
unsigned char* top_blob)
{
int thread_per_block_x = ((bottom_blob_info.w - 1) / 64 + 1) * 64;
if (thread_per_block_x > 128) thread_per_block_x = 128;
int thread_per_block_y = ((bottom_blob_info.h - 1) / 8 + 1) * 8;
if (thread_per_block_y > 8) thread_per_block_y = 8;
const int thread_per_block_z = 1;
const int total_number_of_channels = bottom_blob_info.c;
const int total_number_of_columns = bottom_blob_info.w;
const int total_number_of_rows = bottom_blob_info.h;
const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z);
const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1,
(total_number_of_rows - 1) / thread_per_block_y + 1,
(total_number_of_channels - 1) / thread_per_block_z + 1);
hipLaunchKernelGGL(( gpu_flatten_forward), dim3(grid_size), dim3(block_size), 0, 0, bottom_blob, bottom_blob_info, top_blob);
return 0;
}
} | f20e69cb2714d6ff0cf622344195e4dcb9ff9fc7.cu | //
// Author: Marko Atanasievski
//
// Copyright (C) 2020 TANCOM SOFTWARE SOLUTIONS Ltd. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "cuda_util.h"
#include <iostream>
#include "mat.h"
__global__ void gpu_flatten_forward(const unsigned char* a_input, const ncnn::CudaMatInfo a_info, unsigned char* output) {
const int column = blockIdx.x * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int channel = blockIdx.z * blockDim.z + threadIdx.z;
if (column >= a_info.w || row >= a_info.h || channel >= a_info.c)
{
return;
}
const int input_index = channel * a_info.cstep * a_info.elemsize + row * a_info.w * a_info.elemsize + column * a_info.elemsize;
const int output_index = a_info.w * a_info.h * a_info.elemsize * channel + row * a_info.w * a_info.elemsize + column * a_info.elemsize;
memcpy((void*)(output + output_index), (void*)(a_input + input_index), a_info.elemsize);
}
namespace ncnn {
int flatten_cuda_forward(const unsigned char* bottom_blob, const ncnn::CudaMatInfo bottom_blob_info,
unsigned char* top_blob)
{
int thread_per_block_x = ((bottom_blob_info.w - 1) / 64 + 1) * 64;
if (thread_per_block_x > 128) thread_per_block_x = 128;
int thread_per_block_y = ((bottom_blob_info.h - 1) / 8 + 1) * 8;
if (thread_per_block_y > 8) thread_per_block_y = 8;
const int thread_per_block_z = 1;
const int total_number_of_channels = bottom_blob_info.c;
const int total_number_of_columns = bottom_blob_info.w;
const int total_number_of_rows = bottom_blob_info.h;
const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z);
const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1,
(total_number_of_rows - 1) / thread_per_block_y + 1,
(total_number_of_channels - 1) / thread_per_block_z + 1);
gpu_flatten_forward<<<grid_size, block_size>>>(bottom_blob, bottom_blob_info, top_blob);
return 0;
}
} |
231f04c4892ee9cb21bf8433e95c2a320e2d7c71.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright Contributors to the Open Shading Language project.
// SPDX-License-Identifier: BSD-3-Clause
// https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
#include <optix.h>
#if (OPTIX_VERSION < 70000)
#include <optixu/optixu_math_namespace.h>
#include <optixu/optixu_vector_types.h>
#else
#include <optix_device.h>
#endif
#include "rend_lib.h"
#if (OPTIX_VERSION < 70000)
using namespace optix;
// Launch variables
rtDeclareVariable (uint2, launch_index, rtLaunchIndex, );
rtDeclareVariable (uint2, launch_dim, rtLaunchDim, );
// Scene/Shading variables
rtDeclareVariable (float, invw, , );
rtDeclareVariable (float, invh, , );
rtDeclareVariable (int, flipv, , );
// Buffers
rtBuffer<float3,2> output_buffer;
rtDeclareVariable (rtCallableProgramId<void (void*, void*)>, osl_init_func, , );
rtDeclareVariable (rtCallableProgramId<void (void*, void*)>, osl_group_func, ,);
RT_PROGRAM void raygen()
{
// Compute the pixel coordinates
float2 d = make_float2 (static_cast<float>(launch_index.x) + 0.5f,
static_cast<float>(launch_index.y) + 0.5f);
// TODO: Fixed-sized allocations can easily be exceeded by arbitrary shader
// networks, so there should be (at least) some mechanism to issue a
// warning or error if the closure or param storage can possibly be
// exceeded.
alignas(8) char closure_pool[256];
alignas(8) char params [256];
ShaderGlobals sg;
// Setup the ShaderGlobals
sg.I = make_float3(0,0,1);
sg.N = make_float3(0,0,1);
sg.Ng = make_float3(0,0,1);
sg.P = make_float3(d.x, d.y, 0);
sg.u = d.x * invw;
sg.v = d.y * invh;
if (flipv)
sg.v = 1.f - sg.v;
sg.dudx = invw;
sg.dudy = 0;
sg.dvdx = 0;
sg.dvdy = invh;
sg.dPdu = make_float3(d.x, 0, 0);
sg.dPdv = make_float3(0, d.y, 0);
sg.dPdu = make_float3(1.f / invw, 0.f , 0.f);
sg.dPdv = make_float3(0.0f, 1.f / invh, 0.f);
sg.dPdx = make_float3(1.f, 0.f, 0.f);
sg.dPdy = make_float3(0.f, 1.f, 0.f);
sg.dPdz = make_float3(0.f, 0.f, 0.f);
sg.Ci = NULL;
sg.surfacearea = 0;
sg.backfacing = 0;
// NB: These variables are not used in the current iteration of the sample
sg.raytype = CAMERA;
sg.flipHandedness = 0;
// Pack the "closure pool" into one of the ShaderGlobals pointers
*(int*) &closure_pool[0] = 0;
sg.renderstate = &closure_pool[0];
// Run the OSL group and init functions
osl_init_func (&sg, params);
osl_group_func(&sg, params);
float* output = (float*)params;
output_buffer[launch_index] = {output[1], output[2], output[3]};
}
#else //#if (OPTIX_VERSION < 70000)
#include "render_params.h"
#include <optix_device.h>
extern "C" {
__device__ __constant__ RenderParams render_params;
}
extern "C" __global__ void __miss__()
{
// do nothing
}
extern "C" __global__ void __closesthit__()
{
// do nothing
}
extern "C" __global__ void __anyhit__()
{
// do nothing
}
extern "C" __global__ void __raygen__()
{
uint3 launch_dims = optixGetLaunchDimensions();
uint3 launch_index = optixGetLaunchIndex();
void *p = reinterpret_cast<void *>(optixGetSbtDataPointer());
// Compute the pixel coordinates
float2 d = make_float2 (static_cast<float>(launch_index.x) + 0.5f,
static_cast<float>(launch_index.y) + 0.5f);
// TODO: Fixed-sized allocations can easily be exceeded by arbitrary shader
// networks, so there should be (at least) some mechanism to issue a
// warning or error if the closure or param storage can possibly be
// exceeded.
alignas(8) char closure_pool[256];
alignas(8) char params [256];
const float invw = render_params.invw;
const float invh = render_params.invh;
bool flipv = render_params.flipv;
float3* output_buffer = reinterpret_cast<float3 *>(render_params.output_buffer);
ShaderGlobals sg;
// Setup the ShaderGlobals
sg.I = make_float3(0,0,1);
sg.N = make_float3(0,0,1);
sg.Ng = make_float3(0,0,1);
sg.P = make_float3(d.x, d.y, 0);
sg.u = d.x * invw;
sg.v = d.y * invh;
if (flipv)
sg.v = 1.f - sg.v;
sg.dudx = invw;
sg.dudy = 0;
sg.dvdx = 0;
sg.dvdy = invh;
sg.dPdu = make_float3(d.x, 0, 0);
sg.dPdv = make_float3(0, d.y, 0);
sg.dPdu = make_float3(1.f / invw, 0.f , 0.f);
sg.dPdv = make_float3(0.0f, 1.f / invh, 0.f);
sg.dPdx = make_float3(1.f, 0.f, 0.f);
sg.dPdy = make_float3(0.f, 1.f, 0.f);
sg.dPdz = make_float3(0.f, 0.f, 0.f);
sg.Ci = NULL;
sg.surfacearea = 0;
sg.backfacing = 0;
// NB: These variables are not used in the current iteration of the sample
sg.raytype = CAMERA;
sg.flipHandedness = 0;
// Pack the "closure pool" into one of the ShaderGlobals pointers
*(int*) &closure_pool[0] = 0;
sg.renderstate = &closure_pool[0];
// Run the OSL group and init functions
optixDirectCall<void, ShaderGlobals*, void *>(0u, &sg, params); // call osl_init_func
optixDirectCall<void, ShaderGlobals*, void *>(1u, &sg, params); // call osl_group_func
float* f_output = (float*)params;
int pixel = launch_index.y * launch_dims.x + launch_index.x;
output_buffer[pixel] = {f_output[1], f_output[2], f_output[3]};
}
// Because clang++ 9.0 seems to have trouble with some of the texturing "intrinsics"
// let's do the texture look-ups in this file.
extern "C"
__device__ float4 osl_tex2DLookup(void *handle, float s, float t)
{
hipTextureObject_t texID = hipTextureObject_t(handle);
return tex2D<float4>(texID, s, t);
}
#endif //#if (OPTIX_VERSION < 70000)
| 231f04c4892ee9cb21bf8433e95c2a320e2d7c71.cu | // Copyright Contributors to the Open Shading Language project.
// SPDX-License-Identifier: BSD-3-Clause
// https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
#include <optix.h>
#if (OPTIX_VERSION < 70000)
#include <optixu/optixu_math_namespace.h>
#include <optixu/optixu_vector_types.h>
#else
#include <optix_device.h>
#endif
#include "rend_lib.h"
#if (OPTIX_VERSION < 70000)
using namespace optix;
// Launch variables
rtDeclareVariable (uint2, launch_index, rtLaunchIndex, );
rtDeclareVariable (uint2, launch_dim, rtLaunchDim, );
// Scene/Shading variables
rtDeclareVariable (float, invw, , );
rtDeclareVariable (float, invh, , );
rtDeclareVariable (int, flipv, , );
// Buffers
rtBuffer<float3,2> output_buffer;
rtDeclareVariable (rtCallableProgramId<void (void*, void*)>, osl_init_func, , );
rtDeclareVariable (rtCallableProgramId<void (void*, void*)>, osl_group_func, ,);
RT_PROGRAM void raygen()
{
// Compute the pixel coordinates
float2 d = make_float2 (static_cast<float>(launch_index.x) + 0.5f,
static_cast<float>(launch_index.y) + 0.5f);
// TODO: Fixed-sized allocations can easily be exceeded by arbitrary shader
// networks, so there should be (at least) some mechanism to issue a
// warning or error if the closure or param storage can possibly be
// exceeded.
alignas(8) char closure_pool[256];
alignas(8) char params [256];
ShaderGlobals sg;
// Setup the ShaderGlobals
sg.I = make_float3(0,0,1);
sg.N = make_float3(0,0,1);
sg.Ng = make_float3(0,0,1);
sg.P = make_float3(d.x, d.y, 0);
sg.u = d.x * invw;
sg.v = d.y * invh;
if (flipv)
sg.v = 1.f - sg.v;
sg.dudx = invw;
sg.dudy = 0;
sg.dvdx = 0;
sg.dvdy = invh;
sg.dPdu = make_float3(d.x, 0, 0);
sg.dPdv = make_float3(0, d.y, 0);
sg.dPdu = make_float3(1.f / invw, 0.f , 0.f);
sg.dPdv = make_float3(0.0f, 1.f / invh, 0.f);
sg.dPdx = make_float3(1.f, 0.f, 0.f);
sg.dPdy = make_float3(0.f, 1.f, 0.f);
sg.dPdz = make_float3(0.f, 0.f, 0.f);
sg.Ci = NULL;
sg.surfacearea = 0;
sg.backfacing = 0;
// NB: These variables are not used in the current iteration of the sample
sg.raytype = CAMERA;
sg.flipHandedness = 0;
// Pack the "closure pool" into one of the ShaderGlobals pointers
*(int*) &closure_pool[0] = 0;
sg.renderstate = &closure_pool[0];
// Run the OSL group and init functions
osl_init_func (&sg, params);
osl_group_func(&sg, params);
float* output = (float*)params;
output_buffer[launch_index] = {output[1], output[2], output[3]};
}
#else //#if (OPTIX_VERSION < 70000)
#include "render_params.h"
#include <optix_device.h>
extern "C" {
__device__ __constant__ RenderParams render_params;
}
extern "C" __global__ void __miss__()
{
// do nothing
}
extern "C" __global__ void __closesthit__()
{
// do nothing
}
extern "C" __global__ void __anyhit__()
{
// do nothing
}
extern "C" __global__ void __raygen__()
{
uint3 launch_dims = optixGetLaunchDimensions();
uint3 launch_index = optixGetLaunchIndex();
void *p = reinterpret_cast<void *>(optixGetSbtDataPointer());
// Compute the pixel coordinates
float2 d = make_float2 (static_cast<float>(launch_index.x) + 0.5f,
static_cast<float>(launch_index.y) + 0.5f);
// TODO: Fixed-sized allocations can easily be exceeded by arbitrary shader
// networks, so there should be (at least) some mechanism to issue a
// warning or error if the closure or param storage can possibly be
// exceeded.
alignas(8) char closure_pool[256];
alignas(8) char params [256];
const float invw = render_params.invw;
const float invh = render_params.invh;
bool flipv = render_params.flipv;
float3* output_buffer = reinterpret_cast<float3 *>(render_params.output_buffer);
ShaderGlobals sg;
// Setup the ShaderGlobals
sg.I = make_float3(0,0,1);
sg.N = make_float3(0,0,1);
sg.Ng = make_float3(0,0,1);
sg.P = make_float3(d.x, d.y, 0);
sg.u = d.x * invw;
sg.v = d.y * invh;
if (flipv)
sg.v = 1.f - sg.v;
sg.dudx = invw;
sg.dudy = 0;
sg.dvdx = 0;
sg.dvdy = invh;
sg.dPdu = make_float3(d.x, 0, 0);
sg.dPdv = make_float3(0, d.y, 0);
sg.dPdu = make_float3(1.f / invw, 0.f , 0.f);
sg.dPdv = make_float3(0.0f, 1.f / invh, 0.f);
sg.dPdx = make_float3(1.f, 0.f, 0.f);
sg.dPdy = make_float3(0.f, 1.f, 0.f);
sg.dPdz = make_float3(0.f, 0.f, 0.f);
sg.Ci = NULL;
sg.surfacearea = 0;
sg.backfacing = 0;
// NB: These variables are not used in the current iteration of the sample
sg.raytype = CAMERA;
sg.flipHandedness = 0;
// Pack the "closure pool" into one of the ShaderGlobals pointers
*(int*) &closure_pool[0] = 0;
sg.renderstate = &closure_pool[0];
// Run the OSL group and init functions
optixDirectCall<void, ShaderGlobals*, void *>(0u, &sg, params); // call osl_init_func
optixDirectCall<void, ShaderGlobals*, void *>(1u, &sg, params); // call osl_group_func
float* f_output = (float*)params;
int pixel = launch_index.y * launch_dims.x + launch_index.x;
output_buffer[pixel] = {f_output[1], f_output[2], f_output[3]};
}
// Because clang++ 9.0 seems to have trouble with some of the texturing "intrinsics"
// let's do the texture look-ups in this file.
extern "C"
__device__ float4 osl_tex2DLookup(void *handle, float s, float t)
{
cudaTextureObject_t texID = cudaTextureObject_t(handle);
return tex2D<float4>(texID, s, t);
}
#endif //#if (OPTIX_VERSION < 70000)
|
7a2d68022218bc9228a0f982efc22f9418a43782.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated c Tue Aug 13 16:45:14 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 512
#else
#define BLOCK_SIZE 768
#endif
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, magmaFloatComplex* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
static
__device__ void zsum_reduce( int n, int i, magmaFloatComplex* x )
{
__syncthreads();
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
//==============================================================================
__global__
void magma_clarfx_kernel( int m, magmaFloatComplex *v, magmaFloatComplex *tau,
magmaFloatComplex *c, int ldc, float *xnorm,
magmaFloatComplex *T, int it )
{
if ( !MAGMA_C_EQUAL(*tau, MAGMA_C_ZERO) ) {
const int i = threadIdx.x;
//magmaFloatComplex *dc = c + (blockIdx.x-it-1) * ldc;
magmaFloatComplex *dc = c + (blockIdx.x) * ldc;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
magmaFloatComplex lsum;
/* w := v' * C */
lsum = MAGMA_C_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE ){
if (j==0){
lsum += MAGMA_C_MUL( MAGMA_C_ONE, dc[j] );
v[j] = MAGMA_C_ONE;
}
else
lsum += MAGMA_C_MUL( MAGMA_C_CNJG( v[j] ), dc[j] );
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
/* C := C - v * w */
__syncthreads();
magmaFloatComplex z__1 = - MAGMA_C_CNJG(*tau) * sum[0];
if (blockIdx.x>it){
for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZE )
dc[j] += z__1 * v[j];
__syncthreads();
/* Adjust the rest of the column norms */
if (i==0){
float temp = MAGMA_C_ABS( dc[0] ) / xnorm[blockIdx.x-it-1];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp);
}
}
else
{
if (blockIdx.x==it)
*(T+it) = *tau;
else
*(T+blockIdx.x) = MAGMA_C_CNJG(z__1);
}
}
}
//==============================================================================
__global__
void magma_ctrmv_kernel(const magmaFloatComplex *T, int ldt, magmaFloatComplex *t)
{
const int i = threadIdx.x;
T += i;
__shared__ magmaFloatComplex tlocal[ BLOCK_SIZE ];
magmaFloatComplex res = MAGMA_C_MAKE(0., 0.);
tlocal[i] = t[i];
__syncthreads();
#pragma unroll
for(int j=0; j<blockDim.x; j++)
res += T[j*ldt]*tlocal[j];
t[i] = res;
}
__global__
void magma_ctrmv_kernel2(const magmaFloatComplex *T, int ldt, magmaFloatComplex *t,
magmaFloatComplex *y, magmaFloatComplex *tau)
{
const int i = threadIdx.x;
T += blockIdx.x;
__shared__ magmaFloatComplex sum[ 128 ];
sum[i] = T[i*ldt]*t[i];
zsum_reduce(blockDim.x, i, sum);
__syncthreads();
if (i==0){
y[blockIdx.x] = sum[0];
if (blockIdx.x==0)
y[gridDim.x] = tau[0];
}
}
//==============================================================================
__global__
void magma_ctrmv_tkernel(magmaFloatComplex *T, int ldt, magmaFloatComplex *t, magmaFloatComplex *y)
{
const int i = threadIdx.x;
T += blockIdx.x*ldt;
__shared__ magmaFloatComplex sum[ 128 ];
sum[i] = MAGMA_C_CNJG(T[i])*t[i];
zsum_reduce(blockDim.x, i, sum);
__syncthreads();
if (i==0)
y[blockIdx.x] = sum[0];
}
//==============================================================================
/*
Apply a complex elementary reflector H to a complex M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v'
where tau is a complex scalar and v is a complex vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H' (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's clarf routine.
*/
extern "C" void
magma_clarfx_gpu(magma_int_t m, magma_int_t n, magmaFloatComplex *v, magmaFloatComplex *tau,
magmaFloatComplex *c, magma_int_t ldc, float *xnorm,
magmaFloatComplex *T, magma_int_t i, magmaFloatComplex *work )
{
magma_int_t N = n + i + 1;
if (i==0)
hipLaunchKernelGGL(( magma_clarfx_kernel), dim3(N), dim3(BLOCK_SIZE), 0, magma_stream , m, v, tau, c, ldc, xnorm, T+i*N, i);
else
hipLaunchKernelGGL(( magma_clarfx_kernel), dim3(N), dim3(BLOCK_SIZE), 0, magma_stream , m, v, tau, c, ldc, xnorm, work, i);
if (i > 0){
//magma_ctrmv_kernel<<< 1, i, 0, magma_stream >>>( T, N, T+i*N);
hipLaunchKernelGGL(( magma_ctrmv_kernel2), dim3(i), dim3(i), 0, magma_stream , T, N, work, T+i*N, tau);
}
}
//==============================================================================
| 7a2d68022218bc9228a0f982efc22f9418a43782.cu | /*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated c Tue Aug 13 16:45:14 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 512
#else
#define BLOCK_SIZE 768
#endif
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, magmaFloatComplex* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
static
__device__ void zsum_reduce( int n, int i, magmaFloatComplex* x )
{
__syncthreads();
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
//==============================================================================
__global__
void magma_clarfx_kernel( int m, magmaFloatComplex *v, magmaFloatComplex *tau,
magmaFloatComplex *c, int ldc, float *xnorm,
magmaFloatComplex *T, int it )
{
if ( !MAGMA_C_EQUAL(*tau, MAGMA_C_ZERO) ) {
const int i = threadIdx.x;
//magmaFloatComplex *dc = c + (blockIdx.x-it-1) * ldc;
magmaFloatComplex *dc = c + (blockIdx.x) * ldc;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
magmaFloatComplex lsum;
/* w := v' * C */
lsum = MAGMA_C_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE ){
if (j==0){
lsum += MAGMA_C_MUL( MAGMA_C_ONE, dc[j] );
v[j] = MAGMA_C_ONE;
}
else
lsum += MAGMA_C_MUL( MAGMA_C_CNJG( v[j] ), dc[j] );
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
/* C := C - v * w */
__syncthreads();
magmaFloatComplex z__1 = - MAGMA_C_CNJG(*tau) * sum[0];
if (blockIdx.x>it){
for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZE )
dc[j] += z__1 * v[j];
__syncthreads();
/* Adjust the rest of the column norms */
if (i==0){
float temp = MAGMA_C_ABS( dc[0] ) / xnorm[blockIdx.x-it-1];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp);
}
}
else
{
if (blockIdx.x==it)
*(T+it) = *tau;
else
*(T+blockIdx.x) = MAGMA_C_CNJG(z__1);
}
}
}
//==============================================================================
__global__
void magma_ctrmv_kernel(const magmaFloatComplex *T, int ldt, magmaFloatComplex *t)
{
const int i = threadIdx.x;
T += i;
__shared__ magmaFloatComplex tlocal[ BLOCK_SIZE ];
magmaFloatComplex res = MAGMA_C_MAKE(0., 0.);
tlocal[i] = t[i];
__syncthreads();
#pragma unroll
for(int j=0; j<blockDim.x; j++)
res += T[j*ldt]*tlocal[j];
t[i] = res;
}
__global__
void magma_ctrmv_kernel2(const magmaFloatComplex *T, int ldt, magmaFloatComplex *t,
magmaFloatComplex *y, magmaFloatComplex *tau)
{
const int i = threadIdx.x;
T += blockIdx.x;
__shared__ magmaFloatComplex sum[ 128 ];
sum[i] = T[i*ldt]*t[i];
zsum_reduce(blockDim.x, i, sum);
__syncthreads();
if (i==0){
y[blockIdx.x] = sum[0];
if (blockIdx.x==0)
y[gridDim.x] = tau[0];
}
}
//==============================================================================
__global__
void magma_ctrmv_tkernel(magmaFloatComplex *T, int ldt, magmaFloatComplex *t, magmaFloatComplex *y)
{
const int i = threadIdx.x;
T += blockIdx.x*ldt;
__shared__ magmaFloatComplex sum[ 128 ];
sum[i] = MAGMA_C_CNJG(T[i])*t[i];
zsum_reduce(blockDim.x, i, sum);
__syncthreads();
if (i==0)
y[blockIdx.x] = sum[0];
}
//==============================================================================
/*
Apply a complex elementary reflector H to a complex M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v'
where tau is a complex scalar and v is a complex vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H' (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's clarf routine.
*/
extern "C" void
magma_clarfx_gpu(magma_int_t m, magma_int_t n, magmaFloatComplex *v, magmaFloatComplex *tau,
magmaFloatComplex *c, magma_int_t ldc, float *xnorm,
magmaFloatComplex *T, magma_int_t i, magmaFloatComplex *work )
{
magma_int_t N = n + i + 1;
if (i==0)
magma_clarfx_kernel<<< N, BLOCK_SIZE, 0, magma_stream >>>( m, v, tau, c, ldc, xnorm, T+i*N, i);
else
magma_clarfx_kernel<<< N, BLOCK_SIZE, 0, magma_stream >>>( m, v, tau, c, ldc, xnorm, work, i);
if (i > 0){
//magma_ctrmv_kernel<<< 1, i, 0, magma_stream >>>( T, N, T+i*N);
magma_ctrmv_kernel2<<< i, i, 0, magma_stream >>>( T, N, work, T+i*N, tau);
}
}
//==============================================================================
|
a5982a778b3ac646e4d965a5ef6055d2912e8fd0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void FindClosestGPU(float3* points, int* indices, int count)
{
if (count <= 1) return;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < count)
{
float3 thisPoint = points[idx]; // every thread takes its own point
float smallestDistSoFar = 3.40282e38f; // almost the biggest possible floating point value
int smallestIdxSoFar = -1;
// run through the list of all other points
for (int i = 0; i < count; i++)
{
if (i == idx) continue;
float dist = (thisPoint.x - points[i].x)*(thisPoint.x - points[i].x);
dist += (thisPoint.y - points[i].y)*(thisPoint.y - points[i].y);
dist += (thisPoint.z - points[i].z)*(thisPoint.z - points[i].z);
if (dist < smallestDistSoFar)
{
smallestDistSoFar = dist;
smallestIdxSoFar = i;
}
}
indices[idx] = smallestIdxSoFar;
}
} | a5982a778b3ac646e4d965a5ef6055d2912e8fd0.cu | #include "includes.h"
__global__ void FindClosestGPU(float3* points, int* indices, int count)
{
if (count <= 1) return;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < count)
{
float3 thisPoint = points[idx]; // every thread takes its own point
float smallestDistSoFar = 3.40282e38f; // almost the biggest possible floating point value
int smallestIdxSoFar = -1;
// run through the list of all other points
for (int i = 0; i < count; i++)
{
if (i == idx) continue;
float dist = (thisPoint.x - points[i].x)*(thisPoint.x - points[i].x);
dist += (thisPoint.y - points[i].y)*(thisPoint.y - points[i].y);
dist += (thisPoint.z - points[i].z)*(thisPoint.z - points[i].z);
if (dist < smallestDistSoFar)
{
smallestDistSoFar = dist;
smallestIdxSoFar = i;
}
}
indices[idx] = smallestIdxSoFar;
}
} |
ceefc820e8c4737af8c2b98296a3290e04e28e3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lbfgs.h"
#include <iostream>
#include <cstdlib>
#include <cmath>
using namespace std;
namespace gpu_rosenbrock_d
{
__global__ void kernelF(const float *d_x, float *d_y)
{
const float &x0 = d_x[0];
const float &x1 = d_x[1];
// f = (1-x0)^2 + 100 (x1-x0^2)^2
const float a = (1.0 - x0);
const float b = (x1 - x0 * x0) ;
*d_y = (a*a) + 100.0f * (b*b);
}
__global__ void kernelGradf(const float *d_x, float *d_grad)
{
const float x0 = d_x[0];
const float x1 = d_x[1];
// df/dx0 = -2 (1-x0) - 400 (x1-x0^2) x0
// df/dx1 = 200 (x1 - x0^2)
d_grad[0] = -2.0f * (1.0f - x0) - 400.0f * x0 * (x1 - x0*x0);
d_grad[1] = 200.0f * (x1 - x0*x0);
}
}
class gpu_rosenbrock : public cost_function
{
public:
gpu_rosenbrock()
: cost_function(2) {}
void f(const float *d_x, float *d_y)
{
hipLaunchKernelGGL(( gpu_rosenbrock_d::kernelF), dim3(1), dim3(1), 0, 0, d_x, d_y);
}
void gradf(const float *d_x, float *d_grad)
{
hipLaunchKernelGGL(( gpu_rosenbrock_d::kernelGradf), dim3(1), dim3(1), 0, 0, d_x, d_grad);
}
void f_gradf(const float *d_x, float *d_f, float *d_grad)
{
f(d_x, d_f);
gradf(d_x, d_grad);
}
};
bool test(floatdouble x0, floatdouble x1, floatdouble epsilon)
{
float xstart[] = { float(x0), float(x1) };
float *d_x;
hipMalloc((void**)&d_x, 2 * sizeof(float));
hipMemcpy(d_x, xstart, 2 * sizeof(float), hipMemcpyHostToDevice);
gpu_rosenbrock rcf;
lbfgs minimizer(rcf);
minimizer.setGradientEpsilon(1e-3f);
lbfgs::status stat = minimizer.minimize(d_x);
hipMemcpy(xstart, d_x, 2 * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_x);
floatdouble e0 = std::abs(xstart[0] - 1.0f);
floatdouble e1 = std::abs(xstart[1] - 1.0f);
if (e0 > epsilon || e1 > epsilon)
{
cerr << "Ended because: " << minimizer.statusToString(stat).c_str() << endl;
cerr << "Starting point (" << x0 << ", " << x1 << ")" << endl;
// cerr << "x = " << xstart[0] << ", err(x) = " << e0 << endl;
// cerr << "y = " << xstart[1] << ", err(y) = " << e1 << endl;
// cerr << "Max. allowed error: " << epsilon << endl;
return false;
}
return true;
}
int main (int argc, char const *argv[])
{
for (int i = -4; i < 5; ++i)
{
for (int j = -4; j < 5; ++j)
{
if (!test(floatdouble(i), floatdouble(j), 1e-2f))
exit(EXIT_FAILURE);
}
}
cout << "Tests successful." << endl;
return EXIT_SUCCESS;
}
| ceefc820e8c4737af8c2b98296a3290e04e28e3e.cu | #include "lbfgs.h"
#include <iostream>
#include <cstdlib>
#include <cmath>
using namespace std;
namespace gpu_rosenbrock_d
{
__global__ void kernelF(const float *d_x, float *d_y)
{
const float &x0 = d_x[0];
const float &x1 = d_x[1];
// f = (1-x0)^2 + 100 (x1-x0^2)^2
const float a = (1.0 - x0);
const float b = (x1 - x0 * x0) ;
*d_y = (a*a) + 100.0f * (b*b);
}
__global__ void kernelGradf(const float *d_x, float *d_grad)
{
const float x0 = d_x[0];
const float x1 = d_x[1];
// df/dx0 = -2 (1-x0) - 400 (x1-x0^2) x0
// df/dx1 = 200 (x1 - x0^2)
d_grad[0] = -2.0f * (1.0f - x0) - 400.0f * x0 * (x1 - x0*x0);
d_grad[1] = 200.0f * (x1 - x0*x0);
}
}
class gpu_rosenbrock : public cost_function
{
public:
gpu_rosenbrock()
: cost_function(2) {}
void f(const float *d_x, float *d_y)
{
gpu_rosenbrock_d::kernelF<<<1, 1>>>(d_x, d_y);
}
void gradf(const float *d_x, float *d_grad)
{
gpu_rosenbrock_d::kernelGradf<<<1, 1>>>(d_x, d_grad);
}
void f_gradf(const float *d_x, float *d_f, float *d_grad)
{
f(d_x, d_f);
gradf(d_x, d_grad);
}
};
bool test(floatdouble x0, floatdouble x1, floatdouble epsilon)
{
float xstart[] = { float(x0), float(x1) };
float *d_x;
cudaMalloc((void**)&d_x, 2 * sizeof(float));
cudaMemcpy(d_x, xstart, 2 * sizeof(float), cudaMemcpyHostToDevice);
gpu_rosenbrock rcf;
lbfgs minimizer(rcf);
minimizer.setGradientEpsilon(1e-3f);
lbfgs::status stat = minimizer.minimize(d_x);
cudaMemcpy(xstart, d_x, 2 * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_x);
floatdouble e0 = std::abs(xstart[0] - 1.0f);
floatdouble e1 = std::abs(xstart[1] - 1.0f);
if (e0 > epsilon || e1 > epsilon)
{
cerr << "Ended because: " << minimizer.statusToString(stat).c_str() << endl;
cerr << "Starting point (" << x0 << ", " << x1 << ")" << endl;
// cerr << "x = " << xstart[0] << ", err(x) = " << e0 << endl;
// cerr << "y = " << xstart[1] << ", err(y) = " << e1 << endl;
// cerr << "Max. allowed error: " << epsilon << endl;
return false;
}
return true;
}
int main (int argc, char const *argv[])
{
for (int i = -4; i < 5; ++i)
{
for (int j = -4; j < 5; ++j)
{
if (!test(floatdouble(i), floatdouble(j), 1e-2f))
exit(EXIT_FAILURE);
}
}
cout << "Tests successful." << endl;
return EXIT_SUCCESS;
}
|
c688dfd1cea990ec77fceeb73bdc3acad56773cf.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/strings/combine.hpp>
#include <cudf/strings/detail/combine.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/transform_scan.h>
namespace cudf {
namespace strings {
namespace detail {
std::unique_ptr<column> join_strings(strings_column_view const& strings,
string_scalar const& separator,
string_scalar const& narep,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto strings_count = strings.size();
if (strings_count == 0) return make_empty_column(data_type{type_id::STRING});
CUDF_EXPECTS(separator.is_valid(), "Parameter separator must be a valid string_scalar");
string_view d_separator(separator.data(), separator.size());
auto d_narep = get_scalar_device_view(const_cast<string_scalar&>(narep));
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// create an offsets array for building the output memory layout
rmm::device_uvector<size_type> output_offsets(strings_count + 1, stream);
auto d_output_offsets = output_offsets.data();
// using inclusive-scan to compute last entry which is the total size
thrust::transform_inclusive_scan(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_output_offsets + 1,
[d_strings, d_separator, d_narep] __device__(size_type idx) {
size_type bytes = 0;
if (d_strings.is_null(idx)) {
if (!d_narep.is_valid()) return 0; // skip nulls
bytes += d_narep.size();
} else
bytes += d_strings.element<string_view>(idx).size_bytes();
if ((idx + 1) < d_strings.size()) bytes += d_separator.size_bytes();
return bytes;
},
thrust::plus<size_type>());
output_offsets.set_element_to_zero_async(0, stream);
// total size is the last entry
size_type const bytes = output_offsets.back_element(stream);
// build offsets column (only 1 string so 2 offset entries)
auto offsets_column =
make_numeric_column(data_type{type_id::INT32}, 2, mask_state::UNALLOCATED, stream, mr);
auto offsets_view = offsets_column->mutable_view();
// set the first entry to 0 and the last entry to bytes
int32_t new_offsets[] = {0, static_cast<int32_t>(bytes)};
CUDA_TRY(hipMemcpyAsync(offsets_view.data<int32_t>(),
new_offsets,
sizeof(new_offsets),
hipMemcpyHostToDevice,
stream.value()));
// build null mask
// only one entry so it is either all valid or all null
auto const null_count =
static_cast<size_type>(strings.null_count() == strings_count && !narep.is_valid());
auto null_mask = null_count
? cudf::detail::create_null_mask(1, cudf::mask_state::ALL_NULL, stream, mr)
: rmm::device_buffer{0, stream, mr};
auto chars_column = detail::create_chars_child_column(strings_count, bytes, stream, mr);
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_strings, d_separator, d_narep, d_output_offsets, d_chars] __device__(size_type idx) {
size_type offset = d_output_offsets[idx];
char* d_buffer = d_chars + offset;
if (d_strings.is_null(idx)) {
if (!d_narep.is_valid())
return; // do not write to buffer if element is null (including separator)
d_buffer = detail::copy_string(d_buffer, d_narep.value());
} else {
string_view d_str = d_strings.element<string_view>(idx);
d_buffer = detail::copy_string(d_buffer, d_str);
}
if ((idx + 1) < d_strings.size()) d_buffer = detail::copy_string(d_buffer, d_separator);
});
return make_strings_column(1,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask),
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> join_strings(strings_column_view const& strings,
string_scalar const& separator,
string_scalar const& narep,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::join_strings(strings, separator, narep, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
| c688dfd1cea990ec77fceeb73bdc3acad56773cf.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/strings/combine.hpp>
#include <cudf/strings/detail/combine.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/transform_scan.h>
namespace cudf {
namespace strings {
namespace detail {
std::unique_ptr<column> join_strings(strings_column_view const& strings,
string_scalar const& separator,
string_scalar const& narep,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto strings_count = strings.size();
if (strings_count == 0) return make_empty_column(data_type{type_id::STRING});
CUDF_EXPECTS(separator.is_valid(), "Parameter separator must be a valid string_scalar");
string_view d_separator(separator.data(), separator.size());
auto d_narep = get_scalar_device_view(const_cast<string_scalar&>(narep));
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// create an offsets array for building the output memory layout
rmm::device_uvector<size_type> output_offsets(strings_count + 1, stream);
auto d_output_offsets = output_offsets.data();
// using inclusive-scan to compute last entry which is the total size
thrust::transform_inclusive_scan(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_output_offsets + 1,
[d_strings, d_separator, d_narep] __device__(size_type idx) {
size_type bytes = 0;
if (d_strings.is_null(idx)) {
if (!d_narep.is_valid()) return 0; // skip nulls
bytes += d_narep.size();
} else
bytes += d_strings.element<string_view>(idx).size_bytes();
if ((idx + 1) < d_strings.size()) bytes += d_separator.size_bytes();
return bytes;
},
thrust::plus<size_type>());
output_offsets.set_element_to_zero_async(0, stream);
// total size is the last entry
size_type const bytes = output_offsets.back_element(stream);
// build offsets column (only 1 string so 2 offset entries)
auto offsets_column =
make_numeric_column(data_type{type_id::INT32}, 2, mask_state::UNALLOCATED, stream, mr);
auto offsets_view = offsets_column->mutable_view();
// set the first entry to 0 and the last entry to bytes
int32_t new_offsets[] = {0, static_cast<int32_t>(bytes)};
CUDA_TRY(cudaMemcpyAsync(offsets_view.data<int32_t>(),
new_offsets,
sizeof(new_offsets),
cudaMemcpyHostToDevice,
stream.value()));
// build null mask
// only one entry so it is either all valid or all null
auto const null_count =
static_cast<size_type>(strings.null_count() == strings_count && !narep.is_valid());
auto null_mask = null_count
? cudf::detail::create_null_mask(1, cudf::mask_state::ALL_NULL, stream, mr)
: rmm::device_buffer{0, stream, mr};
auto chars_column = detail::create_chars_child_column(strings_count, bytes, stream, mr);
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_strings, d_separator, d_narep, d_output_offsets, d_chars] __device__(size_type idx) {
size_type offset = d_output_offsets[idx];
char* d_buffer = d_chars + offset;
if (d_strings.is_null(idx)) {
if (!d_narep.is_valid())
return; // do not write to buffer if element is null (including separator)
d_buffer = detail::copy_string(d_buffer, d_narep.value());
} else {
string_view d_str = d_strings.element<string_view>(idx);
d_buffer = detail::copy_string(d_buffer, d_str);
}
if ((idx + 1) < d_strings.size()) d_buffer = detail::copy_string(d_buffer, d_separator);
});
return make_strings_column(1,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask),
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> join_strings(strings_column_view const& strings,
string_scalar const& separator,
string_scalar const& narep,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::join_strings(strings, separator, narep, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
|
feb9dfbd09a973e1028174491647bcfe0f655374.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <pthread.h>
#include <sys/time.h>
struct timeval tp;
double getTime_usec() {
gettimeofday(&tp, NULL);
return static_cast<double>(tp.tv_sec) * 1E6
+ static_cast<double>(tp.tv_usec);
}
//#include "Queues/QueueJobs.cu"
void *main_IncomingJobsManager(void *p);
pthread_t start_IncomingJobsManager(Queue d_newJobs)
{
//This should do any initializing that the incoming jobs
// manager will need and then launch a thread running
// main_IncomingJobsManager( ), returning that thread
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
pthread_t thread1;
pthread_create( &thread1, &attr, main_IncomingJobsManager, (void*) d_newJobs);
pthread_attr_destroy(&attr);
return thread1;
}
void *moveToCuda(void *val, int size){
void *ret;
hipError_t e = hipMalloc(&ret, size);
if(e!=hipSuccess)printf("CUDA Malloc Error: %s in moveToCuda\n", hipGetErrorString (e));
cudaSafeMemcpy(ret, val, size,
hipMemcpyHostToDevice, stream_dataIn,
"in moveToCuda of IncomingJobsManager.cu");
return ret;
}
float *makeMatrix(){
int ROW = 32;
int COLUMN = 32;
int a=0, b=0;
float *stuff = (float *) malloc(2*(COLUMN * ROW * sizeof(float)));
for(a=0; a<ROW;a++)
{
for(b=0; b<COLUMN;b++)
{
stuff[a + b * ROW]=((float)rand())/((float) RAND_MAX);
stuff[a + b * ROW + ROW * COLUMN] = 0.0;
}
}
return stuff;
}
void *main_IncomingJobsManager(void *p)
{
//The thread should get job descriptions some how and Enqueue them
// into the queue in params
// --eventually this should get jobs from an external application
// but will probably just be hardcoded at first or a parameter
Queue d_newJobs = (Queue) p;
// Hard code for testing
int HC_JobType = 2; // hard code the job type for sleeps
int HC_JobID;
int HC_numThreads = 32;
int HC_jobs = NUMBER_OF_JOBS;
// int HC_matrixWidth = 32;
//int HC_matrixSize = HC_matrixWidth * HC_matrixWidth;
int size = sizeof(struct JobDescription);
printf("Starting IncomingJobs Manager\n");
void * d_sleep_time = moveToCuda(&SLEEP_TIME, sizeof(int));
int i;
for(i=0;i<HC_jobs;i++){
HC_JobID = i;
// launch queue jobs
// malloc the host structure
JobDescription *h_JobDescription = (JobDescription *) malloc(size);
// set the values to the host structure
h_JobDescription->JobType = HC_JobType;
h_JobDescription->JobID = HC_JobID;
h_JobDescription->params = d_sleep_time; //AddSleep
//h_JobDescription->params = moveToCuda(makeMatrix(), (2 * sizeof(float) * HC_matrixSize)); //Matrix
h_JobDescription->numThreads = HC_numThreads;
// enqueue jobs
EnqueueJob(h_JobDescription, d_newJobs);
//printf("Finished EnqueueJob # %d\n", HC_JobID);
// free the local memory
free(h_JobDescription);
}
printf("Finished Incoming Jobs Manager\n");
return 0;
}
| feb9dfbd09a973e1028174491647bcfe0f655374.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <pthread.h>
#include <sys/time.h>
struct timeval tp;
double getTime_usec() {
gettimeofday(&tp, NULL);
return static_cast<double>(tp.tv_sec) * 1E6
+ static_cast<double>(tp.tv_usec);
}
//#include "Queues/QueueJobs.cu"
void *main_IncomingJobsManager(void *p);
pthread_t start_IncomingJobsManager(Queue d_newJobs)
{
//This should do any initializing that the incoming jobs
// manager will need and then launch a thread running
// main_IncomingJobsManager( ), returning that thread
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
pthread_t thread1;
pthread_create( &thread1, &attr, main_IncomingJobsManager, (void*) d_newJobs);
pthread_attr_destroy(&attr);
return thread1;
}
void *moveToCuda(void *val, int size){
void *ret;
cudaError_t e = cudaMalloc(&ret, size);
if(e!=cudaSuccess)printf("CUDA Malloc Error: %s in moveToCuda\n", cudaGetErrorString (e));
cudaSafeMemcpy(ret, val, size,
cudaMemcpyHostToDevice, stream_dataIn,
"in moveToCuda of IncomingJobsManager.cu");
return ret;
}
float *makeMatrix(){
int ROW = 32;
int COLUMN = 32;
int a=0, b=0;
float *stuff = (float *) malloc(2*(COLUMN * ROW * sizeof(float)));
for(a=0; a<ROW;a++)
{
for(b=0; b<COLUMN;b++)
{
stuff[a + b * ROW]=((float)rand())/((float) RAND_MAX);
stuff[a + b * ROW + ROW * COLUMN] = 0.0;
}
}
return stuff;
}
void *main_IncomingJobsManager(void *p)
{
//The thread should get job descriptions some how and Enqueue them
// into the queue in params
// --eventually this should get jobs from an external application
// but will probably just be hardcoded at first or a parameter
Queue d_newJobs = (Queue) p;
// Hard code for testing
int HC_JobType = 2; // hard code the job type for sleeps
int HC_JobID;
int HC_numThreads = 32;
int HC_jobs = NUMBER_OF_JOBS;
// int HC_matrixWidth = 32;
//int HC_matrixSize = HC_matrixWidth * HC_matrixWidth;
int size = sizeof(struct JobDescription);
printf("Starting IncomingJobs Manager\n");
void * d_sleep_time = moveToCuda(&SLEEP_TIME, sizeof(int));
int i;
for(i=0;i<HC_jobs;i++){
HC_JobID = i;
// launch queue jobs
// malloc the host structure
JobDescription *h_JobDescription = (JobDescription *) malloc(size);
// set the values to the host structure
h_JobDescription->JobType = HC_JobType;
h_JobDescription->JobID = HC_JobID;
h_JobDescription->params = d_sleep_time; //AddSleep
//h_JobDescription->params = moveToCuda(makeMatrix(), (2 * sizeof(float) * HC_matrixSize)); //Matrix
h_JobDescription->numThreads = HC_numThreads;
// enqueue jobs
EnqueueJob(h_JobDescription, d_newJobs);
//printf("Finished EnqueueJob # %d\n", HC_JobID);
// free the local memory
free(h_JobDescription);
}
printf("Finished Incoming Jobs Manager\n");
return 0;
}
|
08e96aa51e4f0122e2d8b0a7c798934f63d3ac2c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrixExp.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
double *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
int cr = 1;
int cc = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrixExp), dim3(gridBlock),dim3(threadBlock), 0, 0, a,c,cr,cc);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrixExp), dim3(gridBlock),dim3(threadBlock), 0, 0, a,c,cr,cc);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrixExp), dim3(gridBlock),dim3(threadBlock), 0, 0, a,c,cr,cc);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 08e96aa51e4f0122e2d8b0a7c798934f63d3ac2c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrixExp.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
double *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
int cr = 1;
int cc = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrixExp<<<gridBlock,threadBlock>>>(a,c,cr,cc);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrixExp<<<gridBlock,threadBlock>>>(a,c,cr,cc);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrixExp<<<gridBlock,threadBlock>>>(a,c,cr,cc);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
5df25419f769389c8f9a51df4c831ae66d34c524.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2022 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
/*!
* \file test_warp_tools.cu
* \brief CUDA kernels for testing warp-level primitives.
*/
#include "hoomd/WarpTools.cuh"
#include "test_warp_tools.cuh"
#ifdef __HIP_PLATFORM_HCC__
#define BLOCK_SIZE 64
#define MAX_TPP 64
#else
#define BLOCK_SIZE 32
#define MAX_TPP 32
#endif
namespace hoomd
{
namespace test
{
//! Performs an iterative warp reduction on a data set using \a tpp threads per row.
/*!
* \param d_data Data to scan as a N x width matrix.
* \param d_reduce Output of the reduction at each step.
* \param d_sum Total sum for each row of data.
* \param N Number of rows in data.
* \param width Number of entries to scan.
* \param reduce_idx Indexer for saving intermediate results of reduction.
* \tparam tpp Number of threads to use per row in \a d_data .
*
* The kernel is launched with \a tpp threads working per row in \a d_data, which has \a N rows and
* \a width entries per row. This sub-warp group then iterates through the data in the row,
* performing a reduction at each iteration. The result of the reduction is saved into \a d_reduce
* for each iteration. The total sum is also accumulated into \a d_sum.
*
* This test kernel is more complicated than the basic tests that CUB runs for WarpReduce. The
* reason for this is to emulate a use-case in HOOMD, namely the force accumulation using multiple
* threads per particle.
*/
template<int tpp>
__global__ void warp_reduce_kernel(const int* d_data,
int* d_reduce,
int* d_sum,
const unsigned int N,
const unsigned int width,
const Index2D reduce_idx)
{
// thread id in the global grid
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// row of data that this thread operates on
const unsigned int idx = tid / tpp;
// index of thread within the sub warp
const unsigned int cta_idx = threadIdx.x % tpp;
if (idx >= N)
return;
int sum(0), cntr(0);
unsigned int offset = cta_idx;
bool done = false;
while (!done)
{
// load in data
int thread_data;
if (offset < width)
{
thread_data = d_data[idx * width + offset];
}
else
{
thread_data = 0;
done = true;
}
offset += tpp;
// only scan if sub warp still has work to do
done = hoomd::detail::WarpScan<bool, tpp>().Broadcast(done, 0);
if (!done)
{
// scan the thread data
int sum_iter = hoomd::detail::WarpReduce<int, tpp>().Sum(thread_data);
// save reduce result for this iteration
if (cta_idx == 0)
d_reduce[reduce_idx(idx, cntr)] = sum_iter;
// accumulate total sum
sum += sum_iter;
++cntr;
}
}
// thread 0 writes out accumulated sum
if (cta_idx == 0)
{
d_sum[idx] = sum;
}
}
// Dispatch for warp reduction based on requested threads per particle.
/*!
* \param params Reduction parameters.
* \tparam tpp Number of threads to try to launch.
*
* This recursive template compiles the kernel for all valid threads per particle (powers of 2 from
* 1 to 64), and only executes the kernel for the number of threads that is equal to the value
* specified in \a params.
*/
template<int tpp> void warp_reduce_launcher(const reduce_params& params)
{
if (tpp == params.tpp)
{
dim3 grid((params.N * tpp + BLOCK_SIZE - 1) / BLOCK_SIZE);
hipLaunchKernelGGL((warp_reduce_kernel<tpp>),
dim3(grid),
dim3(BLOCK_SIZE),
0,
0,
params.data,
params.reduce,
params.sum,
params.N,
params.width,
params.reduce_idx);
}
else
{
warp_reduce_launcher<tpp / 2>(params);
}
}
//! Terminates the recursive template.
template<> void warp_reduce_launcher<0>(const reduce_params& params) { }
/*!
* \params Scan parameters.
*
* The scan results are first memset to zero.
*/
void warp_reduce(const reduce_params& params)
{
hipMemset(params.reduce, 0, params.reduce_idx.getNumElements() * sizeof(int));
hipMemset(params.sum, 0, params.N * sizeof(int));
warp_reduce_launcher<MAX_TPP>(params);
}
//! Performs an iterative warp scan on a data set using \a tpp threads per row.
/*!
* \param d_data Data to scan as a N x width matrix.
* \param d_scan Output of the scan at each step of sum.
* \param d_sum Total sum for each row of data.
* \param N Number of rows in data.
* \param width Number of entries to scan.
* \param scan_idx Indexer for saving intermediate results of scan.
* \tparam tpp Number of threads to use per row in \a d_data .
*
* The kernel is launched with \a tpp threads working per row in \a d_data, which has \a N rows and
* \a width entries per row. This sub-warp group then iterates through the data in the row,
* performing an exclusive sum at each iteration. The result of the scan is saved into \a d_scan for
* each thread along with the aggregate at each iteration. The total sum is also accumulated into \a
* d_sum.
*
* This test kernel is more complicated than the basic tests that CUB runs for WarpScan. The reason
* for this is to emulate a use-case in HOOMD, namely the neighbor list generation using multiple
* threads per particle.
*/
template<int tpp>
__global__ void warp_scan_kernel(const int* d_data,
int* d_scan,
int* d_sum,
const unsigned int N,
const unsigned int width,
const Index3D scan_idx)
{
// thread id in the global grid
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// row of data that this thread operates on
const unsigned int idx = tid / tpp;
// index of thread within the sub warp
const unsigned int cta_idx = threadIdx.x % tpp;
if (idx >= N)
return;
int sum(0), cntr(0);
unsigned int offset = cta_idx;
bool done = false;
while (!done)
{
// load in data
int thread_data;
if (offset < width)
{
thread_data = d_data[idx * width + offset];
}
else
{
thread_data = 0;
done = true;
}
offset += tpp;
// only scan if sub warp still has work to do
done = hoomd::detail::WarpScan<bool, tpp>().Broadcast(done, 0);
if (!done)
{
// scan the thread data
int sum_iter(0);
hoomd::detail::WarpScan<int, tpp>().ExclusiveSum(thread_data, thread_data, sum_iter);
// save scan result for this iteration
d_scan[scan_idx(idx, cta_idx, cntr)] = thread_data;
if (cta_idx == 0)
d_scan[scan_idx(idx, tpp, cntr)] = sum_iter;
// accumulate total sum
sum += sum_iter;
++cntr;
}
}
// thread 0 writes out accumulated sum
if (cta_idx == 0)
{
d_sum[idx] = sum;
}
}
// Dispatch for warp scan based on requested threads per particle.
/*!
* \param params Scan parameters.
* \tparam tpp Number of threads to try to launch.
*
* This recursive template compiles the kernel for all valid threads per particle (powers of 2 from
* 1 to 64) and only executes the kernel for the number of threads that is equal to the value
* specified in \a params.
*/
template<int tpp> void warp_scan_launcher(const scan_params& params)
{
if (tpp == params.tpp)
{
dim3 grid((params.N * tpp + BLOCK_SIZE - 1) / BLOCK_SIZE);
hipLaunchKernelGGL((warp_scan_kernel<tpp>),
dim3(grid),
dim3(BLOCK_SIZE),
0,
0,
params.data,
params.scan,
params.sum,
params.N,
params.width,
params.scan_idx);
}
else
{
warp_scan_launcher<tpp / 2>(params);
}
}
//! Terminates the recursive template.
template<> void warp_scan_launcher<0>(const scan_params& params) { }
/*!
* \params Scan parameters.
*
* The scan results are first memset to zero.
*/
void warp_scan(const scan_params& params)
{
hipMemset(params.scan, 0, params.scan_idx.getNumElements() * sizeof(int));
hipMemset(params.sum, 0, params.N * sizeof(int));
warp_scan_launcher<MAX_TPP>(params);
}
} // end namespace test
} // end namespace hoomd
| 5df25419f769389c8f9a51df4c831ae66d34c524.cu | // Copyright (c) 2009-2022 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
/*!
* \file test_warp_tools.cu
* \brief CUDA kernels for testing warp-level primitives.
*/
#include "hoomd/WarpTools.cuh"
#include "test_warp_tools.cuh"
#ifdef __HIP_PLATFORM_HCC__
#define BLOCK_SIZE 64
#define MAX_TPP 64
#else
#define BLOCK_SIZE 32
#define MAX_TPP 32
#endif
namespace hoomd
{
namespace test
{
//! Performs an iterative warp reduction on a data set using \a tpp threads per row.
/*!
* \param d_data Data to scan as a N x width matrix.
* \param d_reduce Output of the reduction at each step.
* \param d_sum Total sum for each row of data.
* \param N Number of rows in data.
* \param width Number of entries to scan.
* \param reduce_idx Indexer for saving intermediate results of reduction.
* \tparam tpp Number of threads to use per row in \a d_data .
*
* The kernel is launched with \a tpp threads working per row in \a d_data, which has \a N rows and
* \a width entries per row. This sub-warp group then iterates through the data in the row,
* performing a reduction at each iteration. The result of the reduction is saved into \a d_reduce
* for each iteration. The total sum is also accumulated into \a d_sum.
*
* This test kernel is more complicated than the basic tests that CUB runs for WarpReduce. The
* reason for this is to emulate a use-case in HOOMD, namely the force accumulation using multiple
* threads per particle.
*/
template<int tpp>
__global__ void warp_reduce_kernel(const int* d_data,
int* d_reduce,
int* d_sum,
const unsigned int N,
const unsigned int width,
const Index2D reduce_idx)
{
// thread id in the global grid
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// row of data that this thread operates on
const unsigned int idx = tid / tpp;
// index of thread within the sub warp
const unsigned int cta_idx = threadIdx.x % tpp;
if (idx >= N)
return;
int sum(0), cntr(0);
unsigned int offset = cta_idx;
bool done = false;
while (!done)
{
// load in data
int thread_data;
if (offset < width)
{
thread_data = d_data[idx * width + offset];
}
else
{
thread_data = 0;
done = true;
}
offset += tpp;
// only scan if sub warp still has work to do
done = hoomd::detail::WarpScan<bool, tpp>().Broadcast(done, 0);
if (!done)
{
// scan the thread data
int sum_iter = hoomd::detail::WarpReduce<int, tpp>().Sum(thread_data);
// save reduce result for this iteration
if (cta_idx == 0)
d_reduce[reduce_idx(idx, cntr)] = sum_iter;
// accumulate total sum
sum += sum_iter;
++cntr;
}
}
// thread 0 writes out accumulated sum
if (cta_idx == 0)
{
d_sum[idx] = sum;
}
}
// Dispatch for warp reduction based on requested threads per particle.
/*!
* \param params Reduction parameters.
* \tparam tpp Number of threads to try to launch.
*
* This recursive template compiles the kernel for all valid threads per particle (powers of 2 from
* 1 to 64), and only executes the kernel for the number of threads that is equal to the value
* specified in \a params.
*/
template<int tpp> void warp_reduce_launcher(const reduce_params& params)
{
if (tpp == params.tpp)
{
dim3 grid((params.N * tpp + BLOCK_SIZE - 1) / BLOCK_SIZE);
hipLaunchKernelGGL((warp_reduce_kernel<tpp>),
dim3(grid),
dim3(BLOCK_SIZE),
0,
0,
params.data,
params.reduce,
params.sum,
params.N,
params.width,
params.reduce_idx);
}
else
{
warp_reduce_launcher<tpp / 2>(params);
}
}
//! Terminates the recursive template.
template<> void warp_reduce_launcher<0>(const reduce_params& params) { }
/*!
* \params Scan parameters.
*
* The scan results are first memset to zero.
*/
void warp_reduce(const reduce_params& params)
{
hipMemset(params.reduce, 0, params.reduce_idx.getNumElements() * sizeof(int));
hipMemset(params.sum, 0, params.N * sizeof(int));
warp_reduce_launcher<MAX_TPP>(params);
}
//! Performs an iterative warp scan on a data set using \a tpp threads per row.
/*!
* \param d_data Data to scan as a N x width matrix.
* \param d_scan Output of the scan at each step of sum.
* \param d_sum Total sum for each row of data.
* \param N Number of rows in data.
* \param width Number of entries to scan.
* \param scan_idx Indexer for saving intermediate results of scan.
* \tparam tpp Number of threads to use per row in \a d_data .
*
* The kernel is launched with \a tpp threads working per row in \a d_data, which has \a N rows and
* \a width entries per row. This sub-warp group then iterates through the data in the row,
* performing an exclusive sum at each iteration. The result of the scan is saved into \a d_scan for
* each thread along with the aggregate at each iteration. The total sum is also accumulated into \a
* d_sum.
*
* This test kernel is more complicated than the basic tests that CUB runs for WarpScan. The reason
* for this is to emulate a use-case in HOOMD, namely the neighbor list generation using multiple
* threads per particle.
*/
template<int tpp>
__global__ void warp_scan_kernel(const int* d_data,
int* d_scan,
int* d_sum,
const unsigned int N,
const unsigned int width,
const Index3D scan_idx)
{
// thread id in the global grid
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// row of data that this thread operates on
const unsigned int idx = tid / tpp;
// index of thread within the sub warp
const unsigned int cta_idx = threadIdx.x % tpp;
if (idx >= N)
return;
int sum(0), cntr(0);
unsigned int offset = cta_idx;
bool done = false;
while (!done)
{
// load in data
int thread_data;
if (offset < width)
{
thread_data = d_data[idx * width + offset];
}
else
{
thread_data = 0;
done = true;
}
offset += tpp;
// only scan if sub warp still has work to do
done = hoomd::detail::WarpScan<bool, tpp>().Broadcast(done, 0);
if (!done)
{
// scan the thread data
int sum_iter(0);
hoomd::detail::WarpScan<int, tpp>().ExclusiveSum(thread_data, thread_data, sum_iter);
// save scan result for this iteration
d_scan[scan_idx(idx, cta_idx, cntr)] = thread_data;
if (cta_idx == 0)
d_scan[scan_idx(idx, tpp, cntr)] = sum_iter;
// accumulate total sum
sum += sum_iter;
++cntr;
}
}
// thread 0 writes out accumulated sum
if (cta_idx == 0)
{
d_sum[idx] = sum;
}
}
// Dispatch for warp scan based on requested threads per particle.
/*!
* \param params Scan parameters.
* \tparam tpp Number of threads to try to launch.
*
* This recursive template compiles the kernel for all valid threads per particle (powers of 2 from
* 1 to 64) and only executes the kernel for the number of threads that is equal to the value
* specified in \a params.
*/
template<int tpp> void warp_scan_launcher(const scan_params& params)
{
if (tpp == params.tpp)
{
dim3 grid((params.N * tpp + BLOCK_SIZE - 1) / BLOCK_SIZE);
hipLaunchKernelGGL((warp_scan_kernel<tpp>),
dim3(grid),
dim3(BLOCK_SIZE),
0,
0,
params.data,
params.scan,
params.sum,
params.N,
params.width,
params.scan_idx);
}
else
{
warp_scan_launcher<tpp / 2>(params);
}
}
//! Terminates the recursive template.
template<> void warp_scan_launcher<0>(const scan_params& params) { }
/*!
* \params Scan parameters.
*
* The scan results are first memset to zero.
*/
void warp_scan(const scan_params& params)
{
hipMemset(params.scan, 0, params.scan_idx.getNumElements() * sizeof(int));
hipMemset(params.sum, 0, params.N * sizeof(int));
warp_scan_launcher<MAX_TPP>(params);
}
} // end namespace test
} // end namespace hoomd
|
42e87ed7882e80acd09e53f48271c5ae033b7120.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Utilities and system includes
#include <helper_cuda.h>
#ifndef USE_TEXTURE_RGBA8UI
texture<float4, 2, hipReadModeElementType> inTex;
#else
texture<uchar4, 2, hipReadModeElementType> inTex;
#endif
// clamp x to range [a, b]
__device__ float clamp(float x, float a, float b)
{
return max(a, min(b, x));
}
__device__ int clamp(int x, int a, int b)
{
return max(a, min(b, x));
}
// convert floating point rgb color to 8-bit integer
__device__ int rgbToInt(float r, float g, float b)
{
r = clamp(r, 0.0f, 255.0f);
g = clamp(g, 0.0f, 255.0f);
b = clamp(b, 0.0f, 255.0f);
return (int(b)<<16) | (int(g)<<8) | int(r);
}
// get pixel from 2D image, with clamping to border
__device__ uchar4 getPixel(int x, int y, int imgw, int imgh, uchar4* buffer)
{
y = clamp(y, 0, imgh-1);
x = clamp(x, 0, imgw-1);
return buffer[y*imgw + x];
}
// get pixel from 2D image, with clamping to border
__device__ void setPixel(int x, int y, int imgw, uchar4* buffer)
{
#ifndef USE_TEXTURE_RGBA8UI
float4 res = tex2D(inTex, x, y);
uchar4 ucres = make_uchar4(res.x*255.0f, res.y*255.0f, res.z*255.0f, res.w*255.0f);
#else
uchar4 ucres = tex2D(inTex, x, y);
#endif
int old_c = 30;
int new_c = 1;
int total = old_c + new_c;
buffer[y*imgw + x].x = 255 - (total*255 - new_c*ucres.x - old_c*buffer[y*imgw + x].x)/total;
buffer[y*imgw + x].y = 255 - (total*255 - new_c*ucres.y - old_c*buffer[y*imgw + x].y)/total;
buffer[y*imgw + x].z = 255 - (total*255 - new_c*ucres.z - old_c*buffer[y*imgw + x].z)/total;
buffer[y*imgw + x].w = 255 - (total*255 - new_c*ucres.w - old_c*buffer[y*imgw + x].w)/total;
}
// macros to make indexing shared memory easier
#define SMEM(X, Y) sdata[(Y)*tilew+(X)]
/*
2D convolution using shared memory
- operates on 8-bit RGB data stored in 32-bit int
- assumes kernel radius is less than or equal to block size
- not optimized for performance
_____________
| : : |
|_ _:_____:_ _|
| | | |
| | | |
|_ _|_____|_ _|
r | : : |
|___:_____:___|
r bw r
<----tilew---->
*/
__global__ void
cudaProcess(unsigned int *g_odata, uchar4 * motion_buffer, int imgw, int imgh,
int tilew, int r, float threshold, float highlight)
{
extern __shared__ uchar4 sdata[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
// perform motion blur
setPixel(x, y, imgw, motion_buffer);
__syncthreads();
// copy tile to shared memory
// center region
SMEM(r + tx, r + ty) = getPixel(x, y, imgw, imgh, motion_buffer);
// borders
if (threadIdx.x < r)
{
// left
SMEM(tx, r + ty) = getPixel(x - r, y, imgw, imgh, motion_buffer);
// right
SMEM(r + bw + tx, r + ty) = getPixel(x + bw, y, imgw, imgh, motion_buffer);
}
if (threadIdx.y < r)
{
// top
SMEM(r + tx, ty) = getPixel(x, y - r, imgw, imgh, motion_buffer);
// bottom
SMEM(r + tx, r + bh + ty) = getPixel(x, y + bh, imgw, imgh, motion_buffer);
}
// load corners
if ((threadIdx.x < r) && (threadIdx.y < r))
{
// tl
SMEM(tx, ty) = getPixel(x - r, y - r, imgw, imgh, motion_buffer);
// bl
SMEM(tx, r + bh + ty) = getPixel(x - r, y + bh, imgw, imgh, motion_buffer);
// tr
SMEM(r + bw + tx, ty) = getPixel(x + bh, y - r, imgw, imgh, motion_buffer);
// br
SMEM(r + bw + tx, r + bh + ty) = getPixel(x + bw, y + bh, imgw, imgh, motion_buffer);
}
// wait for loads to complete
__syncthreads();
// perform convolution
float rsum = 0.0f;
float gsum = 0.0f;
float bsum = 0.0f;
float samples = 0.0f;
for (int dy=-r; dy<=r; dy++)
{
for (int dx=-r; dx<=r; dx++)
{
uchar4 pixel = SMEM(r+tx+dx, r+ty+dy);
// only sum pixels within disc-shaped kernel
float l = dx*dx + dy*dy;
if (l <= r*r)
{
float r = float(pixel.x);
float g = float(pixel.y);
float b = float(pixel.z);
// brighten highlights
float lum = (r + g + b) / (255*3);
if (lum > threshold)
{
r *= highlight;
g *= highlight;
b *= highlight;
}
rsum += r;
gsum += g;
bsum += b;
samples += 1.0f;
}
}
}
rsum /= samples;
gsum /= samples;
bsum /= samples;
// ABGR
g_odata[y*imgw+x] = rgbToInt(rsum, gsum, bsum);
//g_odata[y*imgw+x] = rgbToInt(x,y,0);
}
extern "C" void
launch_cudaProcess(dim3 grid, dim3 block, int sbytes,
hipArray *g_data_array, uchar4* motion_buffer, unsigned int *g_odata,
int imgw, int imgh, int tilew,
int radius, float threshold, float highlight)
{
checkCudaErrors(hipBindTextureToArray(inTex, g_data_array));
struct hipChannelFormatDesc desc;
checkCudaErrors(hipGetChannelDesc(&desc, g_data_array));
hipLaunchKernelGGL(( cudaProcess), dim3(grid), dim3(block), sbytes , 0, g_odata, motion_buffer, imgw, imgh,
block.x+(2*radius), radius, 0.8f, 4.0f);
}
| 42e87ed7882e80acd09e53f48271c5ae033b7120.cu | /*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Utilities and system includes
#include <helper_cuda.h>
#ifndef USE_TEXTURE_RGBA8UI
texture<float4, 2, cudaReadModeElementType> inTex;
#else
texture<uchar4, 2, cudaReadModeElementType> inTex;
#endif
// clamp x to range [a, b]
__device__ float clamp(float x, float a, float b)
{
return max(a, min(b, x));
}
__device__ int clamp(int x, int a, int b)
{
return max(a, min(b, x));
}
// convert floating point rgb color to 8-bit integer
__device__ int rgbToInt(float r, float g, float b)
{
r = clamp(r, 0.0f, 255.0f);
g = clamp(g, 0.0f, 255.0f);
b = clamp(b, 0.0f, 255.0f);
return (int(b)<<16) | (int(g)<<8) | int(r);
}
// get pixel from 2D image, with clamping to border
__device__ uchar4 getPixel(int x, int y, int imgw, int imgh, uchar4* buffer)
{
y = clamp(y, 0, imgh-1);
x = clamp(x, 0, imgw-1);
return buffer[y*imgw + x];
}
// get pixel from 2D image, with clamping to border
__device__ void setPixel(int x, int y, int imgw, uchar4* buffer)
{
#ifndef USE_TEXTURE_RGBA8UI
float4 res = tex2D(inTex, x, y);
uchar4 ucres = make_uchar4(res.x*255.0f, res.y*255.0f, res.z*255.0f, res.w*255.0f);
#else
uchar4 ucres = tex2D(inTex, x, y);
#endif
int old_c = 30;
int new_c = 1;
int total = old_c + new_c;
buffer[y*imgw + x].x = 255 - (total*255 - new_c*ucres.x - old_c*buffer[y*imgw + x].x)/total;
buffer[y*imgw + x].y = 255 - (total*255 - new_c*ucres.y - old_c*buffer[y*imgw + x].y)/total;
buffer[y*imgw + x].z = 255 - (total*255 - new_c*ucres.z - old_c*buffer[y*imgw + x].z)/total;
buffer[y*imgw + x].w = 255 - (total*255 - new_c*ucres.w - old_c*buffer[y*imgw + x].w)/total;
}
// macros to make indexing shared memory easier
#define SMEM(X, Y) sdata[(Y)*tilew+(X)]
/*
2D convolution using shared memory
- operates on 8-bit RGB data stored in 32-bit int
- assumes kernel radius is less than or equal to block size
- not optimized for performance
_____________
| : : |
|_ _:_____:_ _|
| | | |
| | | |
|_ _|_____|_ _|
r | : : |
|___:_____:___|
r bw r
<----tilew---->
*/
__global__ void
cudaProcess(unsigned int *g_odata, uchar4 * motion_buffer, int imgw, int imgh,
int tilew, int r, float threshold, float highlight)
{
extern __shared__ uchar4 sdata[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
// perform motion blur
setPixel(x, y, imgw, motion_buffer);
__syncthreads();
// copy tile to shared memory
// center region
SMEM(r + tx, r + ty) = getPixel(x, y, imgw, imgh, motion_buffer);
// borders
if (threadIdx.x < r)
{
// left
SMEM(tx, r + ty) = getPixel(x - r, y, imgw, imgh, motion_buffer);
// right
SMEM(r + bw + tx, r + ty) = getPixel(x + bw, y, imgw, imgh, motion_buffer);
}
if (threadIdx.y < r)
{
// top
SMEM(r + tx, ty) = getPixel(x, y - r, imgw, imgh, motion_buffer);
// bottom
SMEM(r + tx, r + bh + ty) = getPixel(x, y + bh, imgw, imgh, motion_buffer);
}
// load corners
if ((threadIdx.x < r) && (threadIdx.y < r))
{
// tl
SMEM(tx, ty) = getPixel(x - r, y - r, imgw, imgh, motion_buffer);
// bl
SMEM(tx, r + bh + ty) = getPixel(x - r, y + bh, imgw, imgh, motion_buffer);
// tr
SMEM(r + bw + tx, ty) = getPixel(x + bh, y - r, imgw, imgh, motion_buffer);
// br
SMEM(r + bw + tx, r + bh + ty) = getPixel(x + bw, y + bh, imgw, imgh, motion_buffer);
}
// wait for loads to complete
__syncthreads();
// perform convolution
float rsum = 0.0f;
float gsum = 0.0f;
float bsum = 0.0f;
float samples = 0.0f;
for (int dy=-r; dy<=r; dy++)
{
for (int dx=-r; dx<=r; dx++)
{
uchar4 pixel = SMEM(r+tx+dx, r+ty+dy);
// only sum pixels within disc-shaped kernel
float l = dx*dx + dy*dy;
if (l <= r*r)
{
float r = float(pixel.x);
float g = float(pixel.y);
float b = float(pixel.z);
// brighten highlights
float lum = (r + g + b) / (255*3);
if (lum > threshold)
{
r *= highlight;
g *= highlight;
b *= highlight;
}
rsum += r;
gsum += g;
bsum += b;
samples += 1.0f;
}
}
}
rsum /= samples;
gsum /= samples;
bsum /= samples;
// ABGR
g_odata[y*imgw+x] = rgbToInt(rsum, gsum, bsum);
//g_odata[y*imgw+x] = rgbToInt(x,y,0);
}
extern "C" void
launch_cudaProcess(dim3 grid, dim3 block, int sbytes,
cudaArray *g_data_array, uchar4* motion_buffer, unsigned int *g_odata,
int imgw, int imgh, int tilew,
int radius, float threshold, float highlight)
{
checkCudaErrors(cudaBindTextureToArray(inTex, g_data_array));
struct cudaChannelFormatDesc desc;
checkCudaErrors(cudaGetChannelDesc(&desc, g_data_array));
cudaProcess<<< grid, block, sbytes >>>(g_odata, motion_buffer, imgw, imgh,
block.x+(2*radius), radius, 0.8f, 4.0f);
}
|
26e1b9f652801e4358b1edb77375555505ca5e38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zgemvmdot.cu normal z -> d, Tue Feb 9 16:05:42 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_d
// initialize arrays with zero
__global__ void
magma_dgpumemzero(
double * d,
int n,
int k )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
for( int j=0; j<k; j++)
d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 );
}
}
// dot product
__global__ void
magma_ddot_kernel(
int Gs,
int n,
double * v,
double * r,
double * vtmp)
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_D_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// dot product for multiple vectors
__global__ void
magma_dblockdot_kernel(
int Gs,
int n,
int k,
double * v,
double * r,
double * vtmp)
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// k vectors v(i)
if (i<n){
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = v[i+j*n] * r[i];
}
else {
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = MAGMA_D_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for multiple vectors
__global__ void
magma_dblockreduce_kernel(
int Gs,
int n,
int k,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ]
: MAGMA_D_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// accelerated reduction for one vector
__global__ void
magma_dreduce_kernel_fast( int Gs,
int n,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_D_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated block reduction for multiple vectors
__global__ void
magma_dblockreduce_kernel_fast(
int Gs,
int n,
int k,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<k; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_D_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_D_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of vectors v_i such that
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaDouble_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaDouble_ptr
r
@param[in]
d1 magmaDouble_ptr
workspace
@param[in]
d2 magmaDouble_ptr
workspace
@param[out]
skp magmaDouble_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dmdotc(
magma_int_t n,
magma_int_t k,
magmaDouble_ptr v,
magmaDouble_ptr r,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = (k)* (local_block_size) * sizeof( double ); // k vecs
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
if (k>1) {
hipLaunchKernelGGL(( magma_dblockdot_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, k, v, r, d1 );
}
else {
hipLaunchKernelGGL(( magma_ddot_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, v, r, d1 );
}
/*
// not necessary to zero GPU mem
magma_dgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d1, n*k,1 );
magma_dgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d2, n*k,1 );
//magmablas_dlaset( MagmaFull, n, k, d1, n, UNKNOWN );
//magmablas_dlaset( MagmaFull, n, k, d2, n, UNKNOWN );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
magma_dblockreduce_kernel<<< Gs_next.x, Bs.x, Ms, queue->cuda_stream >>>
( Gs.x, n, k, aux1, aux2 );
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
for( int j=0; j<k; j++) {
magma_dcopyvector( 1, aux1+j*n, 1, skp+j, 1, UNKNOWN );
}
*/
if ( k>1) {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_dblockreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, k, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
else {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_dreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
magma_dcopyvector_async( k, aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This is an extension of the merged dot product above by chunking
the set of vectors v_i such that the data always fits into cache.
It is equivalent to a matrix vecor product Vr where V
contains few rows and many columns. The computation is the same:
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaDouble_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaDouble_ptr
r
@param[in]
d1 magmaDouble_ptr
workspace
@param[in]
d2 magmaDouble_ptr
workspace
@param[out]
skp magmaDouble_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_dgemvmdot(
magma_int_t n,
magma_int_t k,
magmaDouble_ptr v,
magmaDouble_ptr r,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int rows_left = k;
int offset = 0;
int chunk_size = 4;
// process in chunks of 10 - has to be adapted to hardware and precision
while( rows_left > (chunk_size) ) {
magma_dmdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue );
offset = offset + chunk_size;
rows_left = rows_left-chunk_size;
}
// process rest
magma_dmdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue );
return MAGMA_SUCCESS;
}
| 26e1b9f652801e4358b1edb77375555505ca5e38.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zgemvmdot.cu normal z -> d, Tue Feb 9 16:05:42 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_d
// initialize arrays with zero
__global__ void
magma_dgpumemzero(
double * d,
int n,
int k )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
for( int j=0; j<k; j++)
d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 );
}
}
// dot product
__global__ void
magma_ddot_kernel(
int Gs,
int n,
double * v,
double * r,
double * vtmp)
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_D_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// dot product for multiple vectors
__global__ void
magma_dblockdot_kernel(
int Gs,
int n,
int k,
double * v,
double * r,
double * vtmp)
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// k vectors v(i)
if (i<n){
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = v[i+j*n] * r[i];
}
else {
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = MAGMA_D_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for multiple vectors
__global__ void
magma_dblockreduce_kernel(
int Gs,
int n,
int k,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ]
: MAGMA_D_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// accelerated reduction for one vector
__global__ void
magma_dreduce_kernel_fast( int Gs,
int n,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_D_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated block reduction for multiple vectors
__global__ void
magma_dblockreduce_kernel_fast(
int Gs,
int n,
int k,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<k; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_D_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_D_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of vectors v_i such that
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaDouble_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaDouble_ptr
r
@param[in]
d1 magmaDouble_ptr
workspace
@param[in]
d2 magmaDouble_ptr
workspace
@param[out]
skp magmaDouble_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dmdotc(
magma_int_t n,
magma_int_t k,
magmaDouble_ptr v,
magmaDouble_ptr r,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = (k)* (local_block_size) * sizeof( double ); // k vecs
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
if (k>1) {
magma_dblockdot_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>( Gs.x, n, k, v, r, d1 );
}
else {
magma_ddot_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>( Gs.x, n, v, r, d1 );
}
/*
// not necessary to zero GPU mem
magma_dgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d1, n*k,1 );
magma_dgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d2, n*k,1 );
//magmablas_dlaset( MagmaFull, n, k, d1, n, UNKNOWN );
//magmablas_dlaset( MagmaFull, n, k, d2, n, UNKNOWN );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
magma_dblockreduce_kernel<<< Gs_next.x, Bs.x, Ms, queue->cuda_stream >>>
( Gs.x, n, k, aux1, aux2 );
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
for( int j=0; j<k; j++) {
magma_dcopyvector( 1, aux1+j*n, 1, skp+j, 1, UNKNOWN );
}
*/
if ( k>1) {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_dblockreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, k, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
else {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_dreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
magma_dcopyvector_async( k, aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This is an extension of the merged dot product above by chunking
the set of vectors v_i such that the data always fits into cache.
It is equivalent to a matrix vecor product Vr where V
contains few rows and many columns. The computation is the same:
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaDouble_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaDouble_ptr
r
@param[in]
d1 magmaDouble_ptr
workspace
@param[in]
d2 magmaDouble_ptr
workspace
@param[out]
skp magmaDouble_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_dgemvmdot(
magma_int_t n,
magma_int_t k,
magmaDouble_ptr v,
magmaDouble_ptr r,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int rows_left = k;
int offset = 0;
int chunk_size = 4;
// process in chunks of 10 - has to be adapted to hardware and precision
while( rows_left > (chunk_size) ) {
magma_dmdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue );
offset = offset + chunk_size;
rows_left = rows_left-chunk_size;
}
// process rest
magma_dmdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue );
return MAGMA_SUCCESS;
}
|
7b16fc3e9f518fe6b134a6122706e85250dad1b9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cudaSRectifier_backPropagate_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *dx = NULL;
hipMalloc(&dx, XSIZE*YSIZE);
unsigned int size = 1;
float leakSlope = 1;
float clipping = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cudaSRectifier_backPropagate_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,dx,size,leakSlope,clipping);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cudaSRectifier_backPropagate_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,dx,size,leakSlope,clipping);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cudaSRectifier_backPropagate_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,dx,size,leakSlope,clipping);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7b16fc3e9f518fe6b134a6122706e85250dad1b9.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cudaSRectifier_backPropagate_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *dx = NULL;
cudaMalloc(&dx, XSIZE*YSIZE);
unsigned int size = 1;
float leakSlope = 1;
float clipping = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cudaSRectifier_backPropagate_kernel<<<gridBlock,threadBlock>>>(x,dx,size,leakSlope,clipping);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cudaSRectifier_backPropagate_kernel<<<gridBlock,threadBlock>>>(x,dx,size,leakSlope,clipping);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cudaSRectifier_backPropagate_kernel<<<gridBlock,threadBlock>>>(x,dx,size,leakSlope,clipping);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
13c256e858778966a552b6e725f3a3e52fd3dc5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include "cv.h"
#include "highgui.h"
#include "elas.h"
#include <vector>
#include "triangle.h"
#include "matrix.h"
#include <stdlib.h>
using namespace std;
/***
* 1. hipHostMalloc and hipHostFree wrap
* 2. create Descriptor of two img
* 3. compute support point
* 4. convert dis to cloud
* 5. cuda_computeD
* 6. leftRightConsistencyCheck
* */
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__);}
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if(code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if(abort) exit(code);
}
}
#define WIDTH 320
#define HEIGH 240
#define GRID_SIZE 20
enum setting { ROBOTICS, MIDDLEBURY };
// parameter set
Elas::parameters param;
struct plane {
float t1a, t1b, t1c;
float t2a;
};
__global__ void leftRightConsistencyCheck(float* D1_g, float* D2_g);
/**
* 1. hipHostMalloc and hipHostFree wrap
*
* */
static int count_alloc = 0;
static int count_free = 0;
void cudaFreeHost_cpuaa(void *p)
{
count_free++;
hipHostFree(p);
hipError_t err = hipGetLastError();
if(0 != err) printf("cuda error: %s\n", hipGetErrorString(err));
}
void SetDeviceMap()
{
hipSetDeviceFlags(hipDeviceMapHost);
}
void* HostMal(void **p, long size)
{
count_alloc++;
void *p_g;
// hipHostMalloc((void**)p,size, hipHostMallocDefault | hipHostMallocMapped);
hipHostMalloc((void**)p, size, hipHostMallocDefault );
hipError_t err = hipGetLastError();
if(0 != err) printf("cuda error: %s\n", hipGetErrorString(err));
//
hipHostGetDevicePointer(&p_g, *p, 0);
err = hipGetLastError();
if(0 != err) printf("cuda error: %s\n", hipGetErrorString(err));
return p_g;
}
void allocFreeCount()
{
printf("count_alloc = %d\n", count_alloc);
printf("count_free= %d\n", count_free);
}
/**
* 1. over: hipHostMalloc and hipHostFree wrap
* */
//dim3 threads(320 - 6 , 1);
//dim3 grid( 1, 240 -6 ); => (0, 233)
/**
* 2. create Descriptor of two img
* */
__global__ \
void createDesc_gpu_kernel(uint8_t* I_desc, uint8_t* I_du, uint8_t* I_dv)
{
int u = blockDim.x * blockIdx.x + threadIdx.x;
int v = blockDim.y * blockIdx.y + threadIdx.y;
int x = u + 3;
int y = v + 3;
__shared__ uint8_t I_du_share[320 * 5];
__shared__ uint8_t I_dv_share[320 * 3];
uint8_t *I_desc_curr;
for(int i = 0; i < 5; i++){
*(I_du_share + x + i * 320) = *(I_du + x + (y-2 + i) * 320);
}
for(int i = 0; i < 3; i++){
*(I_dv_share + x + i * 320) = *(I_dv + x + (y-1 + i) * 320);
}
__syncthreads();
I_desc_curr = I_desc + (y* WIDTH + x) * 16;
*(I_desc_curr++) = *(I_du_share + (0 * WIDTH + x + 0) );
*(I_desc_curr++) = *(I_du_share + (1 * WIDTH + x - 2) );
*(I_desc_curr++) = *(I_du_share + (1 * WIDTH + x + 0) );
*(I_desc_curr++) = *(I_du_share + (1 * WIDTH + x + 2) );
*(I_desc_curr++) = *(I_du_share + (2 * WIDTH + x - 1) );
*(I_desc_curr++) = *(I_du_share + (2 * WIDTH + x + 0) );
*(I_desc_curr++) = *(I_du_share + (2 * WIDTH + x + 0) );
*(I_desc_curr++) = *(I_du_share + (2 * WIDTH + x + 2) );
*(I_desc_curr++) = *(I_du_share + (3 * WIDTH + x - 2) );
*(I_desc_curr++) = *(I_du_share + (3 * WIDTH + x + 0) );
*(I_desc_curr++) = *(I_du_share + (3 * WIDTH + x + 2) );
*(I_desc_curr++) = *(I_du_share + (4 * WIDTH + x + 0) );
*(I_desc_curr++) = *(I_dv_share + (0 * WIDTH + x + 0) );
*(I_desc_curr++) = *(I_dv_share + (1 * WIDTH + x + 1) );
*(I_desc_curr++) = *(I_dv_share + (1 * WIDTH + x - 1) );
*(I_desc_curr++) = *(I_dv_share + (2 * WIDTH + x + 0) );
}
int __createDesc_gpu(uint8_t* I_desc, uint8_t* I_du_g, uint8_t* I_dv_g )
{
dim3 threads(WIDTH - 6 , 1);
dim3 grid( 1, HEIGH - 6 );
// hipDeviceSynchronize();
cout<< "create Desc"<<endl;
hipLaunchKernelGGL(( createDesc_gpu_kernel), dim3(grid), dim3(threads), 0 , 0, I_desc, I_du_g, I_dv_g );
cout<< "create Desc1"<<endl;
gpuErrchk(hipDeviceSynchronize());
cout<< "create Desc2"<<endl;
// hipError_t err = hipGetLastError();
// printf("cuda error: %s\n", hipGetErrorString(err));
// hipDeviceSynchronize(); //2.88 - 0.19
}
/**
* 2. over: create Descriptor of two img
* */
/**
* 3. compute support point
* */
__device__ \
uint32_t getAddressOffsetImage1(const int32_t& u, const int32_t& v, const int32_t& width)
{
return v*width + u;
}
__device__ \
unsigned int computeMatchEnergy1(unsigned char* dst1, unsigned char* dst2, int offset)
{
unsigned int a, b, c, e, r0, r4;
a = abs(*(dst1 + offset) - *(dst2 + offset)) + abs(*(dst1 + offset + 1) - *(dst2 + offset + 1));
b = abs(*(dst1 + offset + 2) - *(dst2 + offset + 2)) + abs(*(dst1 + offset + 3) - *(dst2 + offset + 3));
c = abs(*(dst1 + offset + 4) - *(dst2 + offset + 4)) + abs(*(dst1 + offset + 5) - *(dst2 + offset + 5));
e = abs(*(dst1 + offset + 6) - *(dst2 + offset + 6)) + abs(*(dst1 + offset + 7) - *(dst2 + offset + 7));
r0 = a + b + c + e;
a = abs(*(dst1 + offset + 8) - *(dst2 + offset + 8)) + abs(*(dst1 + offset + 9) - *(dst2 + offset + 9));
b = abs(*(dst1 + offset + 10) - *(dst2 + offset + 10)) + abs(*(dst1 + offset + 11) - *(dst2 + offset + 11));
c = abs(*(dst1 + offset + 12) - *(dst2 + offset + 12)) + abs(*(dst1 + offset + 13) - *(dst2 + offset + 13));
e = abs(*(dst1 + offset + 14) - *(dst2 + offset + 14)) + abs(*(dst1 + offset + 15) - *(dst2 + offset + 15));
r4 = a + b + c + e;
return r0 + r4;
}
__device__ \
uint32_t getAddressOffsetGrid1(const int32_t& x, const int32_t& y, \
const int32_t& d, const int32_t& width, const int32_t& disp_num)
{
return (y*width + x)*disp_num + d;
}
__device__ \
void updatePosteriorMinimumNew(unsigned char* dst1, unsigned char* dst2, const int32_t &d, int32_t &val, int32_t &min_val, int32_t &min_d)
{
val = computeMatchEnergy1(dst1, dst2, 0);
if (val<min_val) {
min_val = val;
min_d = d;
}
}
__device__ \
void updatePosteriorMinimumNew1(unsigned char* dst1, unsigned char* dst2, const int32_t &d, const int8_t w, int32_t &val, int32_t &min_val, int32_t &min_d)
{
val = computeMatchEnergy1(dst1, dst2, 0) + w;
if (val<min_val) {
min_val = val;
min_d = d;
}
}
int iDivUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
__device__ \
unsigned int computeMatchEnergy1_new(unsigned char* dst1_1, unsigned char* dst1_2, unsigned char* dst2_1, unsigned char* dst2_2, int32_t u, int32_t u_wrap) {
unsigned int r0, r1, r2, r3;
r0 = 0;
r1 = 0;
r2 = 0;
r3 = 0;
#pragma unroll
for (int i = 0; i < 16; i++) {
r0 += abs(dst2_1[(u_wrap << 4) - 32 + i] - dst1_1[(u << 4) - 32 + i]);
r1 += abs(dst2_1[(u_wrap << 4) + 32 + i] - dst1_1[(u << 4) + 32 + i]);
r2 += abs(dst2_2[(u_wrap << 4) - 32 + i] - dst1_2[(u << 4) - 32 + i]);
r3 += abs(dst2_2[(u_wrap << 4) + 32 + i] - dst1_2[(u << 4) + 32 + i]);
}
return r0 + r1 + r2 + r3;
}
#define D_candidate_stepsize 5
#define INCON_THRESHOLD 5
#define INCON_MIN_SUPPORT 5
#define INCON_WINDOW_SIZE 5
#define SUPPORT_TEXTURE 10
#define DISP_MIN 0
#define DISP_MAX 63
#define SUPPORT_THRESHOLD 0.85
#define U_STEP 2
#define V_STEP 2
#define WINDOW_SIZE 3
#define MIN_1_E 32767
#define MIN_1_D -1
#define MIN_2_E 32767
#define MIN_2_D -1
#define DESC_OFFSET_1 (-16 * U_STEP)
#define DESC_OFFSET_2 (+16 * U_STEP)
#define DESC_OFFSET_3 (-16 * U_STEP)
#define DESC_OFFSET_4 (+16 * U_STEP)
#define BLOCKX 60
#define BLOCKY 1
#define GRIDX 1
#define GRIDY 46
//#define GRIDY 2
//dim3 threads(60, 1);
//dim3 grid(1, 46);
__constant__ uint32_t oneLine = WIDTH * 16;
__global__ void compEner_gpu(uint8_t* I1_desc_shared, uint8_t* I2_desc_shared, int u, int u_wrap, uint32_t* sumResult)
{
int x = threadIdx.x; // x = (0,15)
int32_t sum = 0;
sum = abs(I1_desc_shared[(u - 2) << 4 + x ] - I2_desc_shared[(u_wrap - 2) << 4 + x]);
sum += abs(I1_desc_shared[(u + 2) << 4 + x ] - I2_desc_shared[(u_wrap + 2) << 4 + x]);
sum += abs(I1_desc_shared[(u + 2) << 4 + x + oneLine] - I2_desc_shared[(u_wrap + 2) << 4 + x +oneLine]);
sum += abs(I1_desc_shared[(u - 2) << 4 + x + oneLine] - I2_desc_shared[(u_wrap - 2) << 4 + x +oneLine]);
sumResult[x] = sum;
}
__global__ void sptMathKernel(int32_t D_can_width, int32_t D_can_height, int8_t* D_can, uint8_t* desc1, uint8_t* desc2)
{
int32_t u_wrap;
int disp_max_valid;
int result1 = 0, result2 = 0, result3 = 0, result4 = 0;
int32_t line_offset;
uint8_t *I1_line_addr, *I2_line_addr, *I1_block_addr, *I2_block_addr, *I_line_addr_tmp;
uint8_t *I1_block_addr_1, *I1_block_addr_2, *I2_block_addr_1, *I2_block_addr_2;
int32_t sum = 0;
int16_t min_1_E;
int16_t min_1_d;
int16_t min_2_E;
int16_t min_2_d;
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int u, v, d1 = -1 , d2 = -1;
u = (x + 3) * D_candidate_stepsize; //5
v = (y + 1) * D_candidate_stepsize;
line_offset = 16 * WIDTH*v;
I1_line_addr = desc1 + line_offset;
I2_line_addr = desc2 + line_offset;
__shared__ uint8_t I1_desc_shared[320 * 16 * 2];
__shared__ uint8_t I2_desc_shared[320 * 16 * 2];
for(int i = 0; i < 85; i++){
I1_desc_shared[x + i * BLOCKX ] = *(I1_line_addr + x + i * BLOCKX - 2 * oneLine);
I1_desc_shared[x + i * BLOCKX + oneLine] = *(I1_line_addr + x + i * BLOCKX + 2 * oneLine);
I2_desc_shared[x + i * BLOCKX ] = *(I2_line_addr + x + i * BLOCKX - 2 * oneLine);
I2_desc_shared[x + i * BLOCKX + oneLine] = *(I2_line_addr + x + i * BLOCKX + 2 * oneLine);
}
__syncthreads();
for (int32_t i=0; i<16; i++)
sum += abs((int32_t)(*(I1_line_addr + 16 * u +i))-128);
if (sum<10){
d1 = -1;
return;
}
I1_block_addr_1 = I1_desc_shared + 16 * u ;
I1_block_addr_2 = I1_desc_shared + 16 * u + oneLine ;
disp_max_valid = min(63, u - 5);
min_1_E = MIN_1_E;
min_1_d = MIN_1_D;
min_2_E = MIN_2_E;
min_2_d = MIN_2_D;
for (int16_t d = 0; d <= disp_max_valid; d++) {
u_wrap = u - d;
I2_block_addr_1 = I2_desc_shared + 16 * u_wrap;
I2_block_addr_2 = I2_desc_shared + 16 * u_wrap + oneLine;
// result1 = computeMatchEnergy1(I1_block_addr_1, I2_block_addr_1, DESC_OFFSET_1);
// result2 = computeMatchEnergy1(I1_block_addr_1, I2_block_addr_1, DESC_OFFSET_2);
// result3 = computeMatchEnergy1(I1_block_addr_2, I2_block_addr_2, DESC_OFFSET_3);
result4 = computeMatchEnergy1(I1_block_addr_2, I2_block_addr_2, DESC_OFFSET_4);
// sum = result1 + result2 + result3 + result4;
sum = result4;
if (sum<min_1_E) {
min_2_E = min_1_E;
min_2_d = min_1_d;
min_1_E = sum;
min_1_d = d;
}
else if (sum<min_2_E) {
min_2_E = sum;
min_2_d = d;
}
}
if (min_1_d>=0 && min_2_d>=0 && (float)min_1_E < 0.85*(float)min_2_E)
d1 = min_1_d;
sum = 0;
if (d1 >= 0){
min_1_E = MIN_1_E;
min_1_d = MIN_1_D;
min_2_E = MIN_2_E;
min_2_d = MIN_2_D;
u = u - d1;
disp_max_valid = min(63, WIDTH - u - 5);
I2_block_addr_1 = I2_desc_shared + 16 * u;
I2_block_addr_2 = I2_desc_shared + 16 * u + 320 * 16;
sum = 0;
#pragma unroll
for (int32_t i=0; i<16; i++)
sum += abs((int32_t)(*(I2_line_addr+i))-128);
if (sum<10){
return;
}
sum = 0;
for(int16_t d = 0; d <= disp_max_valid; d++){
u_wrap = u + d;
I1_block_addr_1 = I1_desc_shared + 16 * u_wrap;
I1_block_addr_2 = I1_desc_shared + 16 * u_wrap + 320 * 16;
// result1 = computeMatchEnergy1(I1_block_addr_1, I2_block_addr_1, DESC_OFFSET_1);
// result2 = computeMatchEnergy1(I1_block_addr_1, I2_block_addr_1, DESC_OFFSET_2);
// result3 = computeMatchEnergy1(I1_block_addr_2, I2_block_addr_2, DESC_OFFSET_3);
result4 = computeMatchEnergy1(I1_block_addr_2, I2_block_addr_2, DESC_OFFSET_4);
// sum = result1 + result2 + result3 + result4;
sum = result4;
// sum = computeMatchEnergy1_new(I2_desc_shared, I2_desc_shared + oneLine, I1_desc_shared, I1_desc_shared + oneLine, u, u_wrap);
if (sum<min_1_E) {
min_2_E = min_1_E;
min_2_d = min_1_d;
min_1_E = sum;
min_1_d = d;
}
else if (sum<min_2_E) {
min_2_E = sum;
min_2_d = d;
}
}
if (min_1_d>=0 && min_2_d>=0 && (float)min_1_E < 0.85*(float)min_2_E)
d2 = min_1_d;
if( d2 >= 0 && abs(d2 - d1) <= 2 )
D_can[x + y * D_can_width] = d1;
}
}
void addCornerSupportPoints(vector<Elas::support_pt> &p_support, int32_t width, int32_t height) {
// list of border points
vector<Elas::support_pt> p_border;
p_border.push_back(Elas::support_pt(0, 0, 0));
p_border.push_back(Elas::support_pt(0, height - 1, 0));
p_border.push_back(Elas::support_pt(width - 1, 0, 0));
p_border.push_back(Elas::support_pt(width - 1, height - 1, 0));
// find closest d
for (int32_t i = 0; i<p_border.size(); i++) {
int32_t best_dist = 10000000;
for (int32_t j = 0; j<p_support.size(); j++) {
int32_t du = p_border[i].u - p_support[j].u;
int32_t dv = p_border[i].v - p_support[j].v;
int32_t curr_dist = du*du + dv*dv;
if (curr_dist<best_dist) {
best_dist = curr_dist;
p_border[i].d = p_support[j].d;
}
}
}
// for right image
p_border.push_back(Elas::support_pt(p_border[2].u + p_border[2].d, p_border[2].v, p_border[2].d));
p_border.push_back(Elas::support_pt(p_border[3].u + p_border[3].d, p_border[3].v, p_border[3].d));
// add border points to support points
for (int32_t i = 0; i<p_border.size(); i++)
p_support.push_back(p_border[i]);
}
__global__ void removeInconsistentSupportPoints1(int16_t* D_can, int32_t D_can_width, int32_t D_can_height) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int u, v;
if (x < D_can_width && y < D_can_height) {
int16_t d_can = *(D_can + getAddressOffsetImage1(x, y, D_can_width));
if (d_can >= 0) {
int32_t support = 0;
for (int32_t u_can_2 = x - INCON_WINDOW_SIZE; u_can_2 <= x + INCON_WINDOW_SIZE; u_can_2++) {
for (int32_t v_can_2 = y - INCON_WINDOW_SIZE; v_can_2 <= y + INCON_WINDOW_SIZE; v_can_2++) {
if (u_can_2 >= 0 && v_can_2 >= 0 && u_can_2<D_can_width && v_can_2<D_can_height) {
int16_t d_can_2 = *(D_can + getAddressOffsetImage1(u_can_2, v_can_2, D_can_width));
if (d_can_2 >= 0 && abs(d_can - d_can_2) <= INCON_THRESHOLD)
support++;
}
}
}
// invalidate support point if number of supporting points is too low
if (support<INCON_MIN_SUPPORT)
*(D_can + getAddressOffsetImage1(x, y, D_can_width)) = -1;
}
}
}
__global__ void removeRedundantSupportPoints1(int16_t* D_can, int32_t D_can_width, int32_t D_can_height,
int32_t redun_max_dist, int32_t redun_threshold, bool vertical) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < D_can_width && y < D_can_height) {
// parameters
int32_t redun_dir_u[2] = { 0,0 };
int32_t redun_dir_v[2] = { 0,0 };
if (vertical) {
redun_dir_v[0] = -1;
redun_dir_v[1] = +1;
}
else {
redun_dir_u[0] = -1;
redun_dir_u[1] = +1;
}
int16_t d_can = *(D_can + getAddressOffsetImage1(x, y, D_can_width));
if (d_can >= 0) {
// check all directions for redundancy
bool redundant = true;
for (int32_t i = 0; i<2; i++) {
// search for support
int32_t u_can_2 = x;
int32_t v_can_2 = y;
int16_t d_can_2;
bool support = false;
for (int32_t j = 0; j<redun_max_dist; j++) {
u_can_2 += redun_dir_u[i];
v_can_2 += redun_dir_v[i];
if (u_can_2<0 || v_can_2<0 || u_can_2 >= D_can_width || v_can_2 >= D_can_height)
break;
d_can_2 = *(D_can + getAddressOffsetImage1(u_can_2, v_can_2, D_can_width));
if (d_can_2 >= 0 && abs(d_can - d_can_2) <= redun_threshold) {
support = true;
break;
}
}
// if we have no support => point is not redundant
if (!support) {
redundant = false;
break;
}
}
// invalidate support point if it is redundant
if (redundant)
*(D_can + getAddressOffsetImage1(x, y, D_can_width)) = -1;
}
}
}
vector<Elas::support_pt> computeSupportMatches_g(uint8_t* I_desc1, uint8_t* I_desc2, \
int8_t* D_sup_c, int8_t* D_sup_g)
{
// create matrix for saving disparity candidates
int32_t D_can_width = 60; //[15,310] => 60
int32_t D_can_height = 48; //[5, 230] => 46
gpuErrchk(hipMemset(D_sup_g, -1, D_can_width*D_can_height * sizeof(int8_t)));
dim3 threads(BLOCKX, BLOCKY);
dim3 grid(GRIDX, GRIDY);
gpuErrchk(hipFuncSetCacheConfig(sptMathKernel,hipFuncCachePreferShared));
//compute support
// hipDeviceSynchronize(); //
sptMathKernel << <grid, threads, 0 >> > (D_can_width, D_can_height, D_sup_g, I_desc1, I_desc2);
//hipDeviceSynchronize(); //13ms
gpuErrchk(hipDeviceSynchronize());
//put D_sup to vector of support
vector<Elas::support_pt> p_support;
for (int32_t v_can = 0; v_can<D_can_height; v_can++)
for (int32_t u_can = 0; u_can<D_can_width; u_can++)
if (*(D_sup_c + u_can + v_can * D_can_width) >= 0)
p_support.push_back(Elas::support_pt((u_can + 3)*D_candidate_stepsize,
(v_can + 1) * D_candidate_stepsize,
*(D_sup_c+ u_can + v_can * D_can_width)));
return p_support;
}
/**
* 3. over: compute support point
* */
/**
* 4. convert dis to cloud
***/
__global__ void Convert(float *D_g, float *cloud_g)
{
int u = blockDim.x * blockIdx.x + threadIdx.x;
int v = blockDim.y * blockIdx.y + threadIdx.y;
v += 20;
float w = 0, x = 0, y = 0, z = 0;
float dis = D_g[u + v * WIDTH];
w = 0.006669723997311648 * dis;
x = (float)((u - 161.2100334167481) / w);
y = (float)(- (v - 119.9240913391113) / w); //has bug
z = (float)(241.57918 / w);
*(cloud_g + (u + v * WIDTH) * 3) = x;
*(cloud_g + (u + v * WIDTH) * 3 + 1) = y;
*(cloud_g + (u + v * WIDTH) * 3 + 2) = z;
if(dis == -1)
{
*(cloud_g + (u + v * WIDTH) * 3) = 10000;
*(cloud_g + (u + v * WIDTH) * 3 + 1) = 10000;
*(cloud_g + (u + v * WIDTH) * 3 + 2) = 10000;
}
// *(cloud_g + (u + v * WIDTH) * 3) = z;
// *(cloud_g + (u + v * WIDTH) * 3 + 1) = y;
// *(cloud_g + (u + v * WIDTH) * 3 + 2) = x;
// A = [u, v, d,1];
// Q = [1, 0, 0, -161.2100334167481; 0, 1, 0, -119.9240913391113;
// 0, 0, 0, 241.57918; 0, 0, 0.006669723997311648, 0]
}
int ConvertD2Z(float* D1_g, float* cloud_g)
{
dim3 threads(320, 1);
dim3 grid(1, 200);
// printf("conv\n");
hipLaunchKernelGGL(( Convert), dim3(grid), dim3(threads), 0, 0, D1_g, cloud_g);
// printf("conv2\n");
// hipDeviceSynchronize();
}
/**
* 4. over: convert dis to cloud
***/
/****
* 5. cuda_computeD
***/
int tri_size = 0;
__constant__ int32_t grid_dims_g[3] = {65, WIDTH/GRID_SIZE, HEIGH/GRID_SIZE} ;
__constant__ int8_t temp[] = {-14,-9,-2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
__global__ void Triangle_Match1(Elas::triangle* tri, int32_t* disparity_grid,\
uint8_t* I1_desc, uint8_t* I2_desc, int8_t* P, \
int32_t plane_radius, bool right_image, float* D, \
int8_t* tp, int tri_size)
{
float plane_a = 0, plane_b = 0, plane_c = 0, plane_d = 0;
int u = blockDim.x * blockIdx.x + threadIdx.x;
int v = blockDim.y * blockIdx.y + threadIdx.y;
int32_t id;
__shared__ uint8_t __I1_desc_share[320 * 16];
__shared__ uint8_t __I2_desc_share[320 * 16];
if(u >= 320)
printf("\n+++++++++++++ u out %d\n", u);
if(v >= 240)
printf("\n+++++++++++++ v out %d\n", v);
for(int i = 0; i < 16; i += 1 )
{
__I1_desc_share[u + i*320] = I1_desc[v * 320*16 + u + i*320];
__I2_desc_share[u + i*320] = I2_desc[v * 320*16 + u + i*320];
}
__syncthreads();
id = tp[u + v * WIDTH];
if(u + v * WIDTH > 320 * 240)
printf("\n+++++++++++++ id1 out %d\n", u + v * WIDTH);
if(id > tri_size)
printf("\n+++++++++++++ id2 out %d\n", id);
plane_a = tri[id].t1a;
plane_b = tri[id].t1b;
plane_c = tri[id].t1c;
plane_d = tri[id].t2a;
bool valid = fabs(plane_a)<0.7 && fabs(plane_d)<0.7;
const int32_t window_size = 2;
// address of disparity we want to compute
uint32_t d_addr;
d_addr = getAddressOffsetImage1(u, v, WIDTH);
if(d_addr > 320 * 240)
printf("+++++++++d_addr out %d\n", d_addr);
uint8_t *I1_line_addr, *I2_line_addr;
I2_line_addr = __I2_desc_share ;
uint8_t* I1_block_addr = __I1_desc_share + 16 * u;
// does this patch have enough texture?
int32_t sum = 0;
//int32_t match_texture = 1;
// //#pragma unroll
// for (int32_t i = 0; i<16; i++)
// sum += abs((int32_t)(*(I1_block_addr + i)) - 127);
// if (sum<match_texture)
// return;
// compute disparity, min disparity and max disparity of plane prior
int32_t d_plane = (int32_t)(plane_a*(float)u + plane_b*(float)v + plane_c);
// int32_t d_plane = (int32_t)(0);
int32_t d_plane_min = max(d_plane - plane_radius, 0);
int32_t d_plane_max = min(d_plane + plane_radius, grid_dims_g[0] - 2);
// get grid pointer
int32_t grid_x = (int32_t)floor((float)u / (float)GRID_SIZE);
int32_t grid_y = (int32_t)floor((float)v / (float)GRID_SIZE);
//(gird_y * 16 + grid_x) * 65
uint32_t grid_addr = getAddressOffsetGrid1(grid_x, grid_y, 0, grid_dims_g[1], grid_dims_g[0]);
if( (grid_addr + 1) > 65 * 12 * 16)
printf("++++++++++ grid_addr out %d\n", grid_addr);
int32_t num_grid = *(disparity_grid + grid_addr);
if( num_grid > 64 )
printf("++++++++++ num_grid out %d\n", num_grid);
int32_t* d_grid = disparity_grid + grid_addr + 1;
// loop variables
int32_t d_curr, u_warp, val;
int32_t min_val = 10000;
int32_t min_d = -1;
// left image
if (!right_image) {
#pragma unroll
for (int32_t i = 0; i<num_grid; i++) {
d_curr = d_grid[i];
if (d_curr<d_plane_min || d_curr>d_plane_max) {
u_warp = u - d_curr;
if (u_warp<window_size || u_warp >= WIDTH - window_size)
continue;
if(u_warp < 0 || u_warp > 320)
printf("_+++++++++ u_wrap1 out %d\n", u_warp);
updatePosteriorMinimumNew(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, val, min_val, min_d);
}
}
#pragma unroll
for (d_curr = d_plane_min; d_curr <= d_plane_max; d_curr++) {
u_warp = u - d_curr;
if (u_warp<window_size || u_warp >= WIDTH - window_size)
continue;
if(u_warp < 0 || u_warp > 320)
printf("_+++++++++ u_wrap2 out %d\n", u_warp);
// updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, valid ? *(P + abs(d_curr - d_plane)) : 0, val, min_val, min_d);
updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, valid ? *(temp + abs(d_curr - d_plane)) : 0, val, min_val, min_d);
}
}
else {
#pragma unroll
for (int32_t i = 0; i<num_grid; i++) {
d_curr = d_grid[i];
if (d_curr<d_plane_min || d_curr>d_plane_max) {
u_warp = u + d_curr;
if (u_warp<window_size || u_warp >= WIDTH - window_size)
continue;
if(u_warp < 0 || u_warp > 320)
printf("_+++++++++ u_wrap3 out %d\n", u_warp);
updatePosteriorMinimumNew(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, val, min_val, min_d);
}
}
#pragma unroll
for (d_curr = d_plane_min; d_curr <= d_plane_max; d_curr++) {
u_warp = u + d_curr;
if (u_warp<window_size || u_warp >= WIDTH - window_size)
continue;
if(u_warp < 0 || u_warp > 320)
printf("_+++++++++ u_wrap4 out %d\n", u_warp);
// updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, valid ? *(P + abs(d_curr - d_plane)) : 0, val, min_val, min_d);
updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, valid ? *(temp + abs(d_curr - d_plane)) : 0, val, min_val, min_d);
}
}
// set disparity value
// if (min_d >= 0){
if (min_d > 0){
*(D + d_addr) = (float)min_d; // MAP value (min neg-Log probability)
}else *(D + d_addr) = -1; // invalid disparity
}
void computeTrianglePoints(const vector<Elas::support_pt> &p_support, const vector<Elas::triangle> &tri, \
bool right_image, int32_t width, int32_t TRI_SIZE, int8_t* tp) {
// loop variables
int32_t c1, c2, c3;
// float plane_a, plane_b, plane_c, plane_d;
// for all triangles do
for (uint32_t i = 0; i<TRI_SIZE; i++) {
int num = 0;
// get plane parameters
uint32_t p_i = i * 3;
// triangle corners
c1 = tri[i].c1;
c2 = tri[i].c2;
c3 = tri[i].c3;
// sort triangle corners wrt. u (ascending)
float tri_u[3];
if (!right_image) { //
tri_u[0] = p_support[c1].u;
tri_u[1] = p_support[c2].u;
tri_u[2] = p_support[c3].u;
}
else { //
tri_u[0] = p_support[c1].u - p_support[c1].d;
tri_u[1] = p_support[c2].u - p_support[c2].d;
tri_u[2] = p_support[c3].u - p_support[c3].d;
}
float tri_v[3] = { p_support[c1].v,p_support[c2].v,p_support[c3].v };
for (uint32_t j = 0; j<3; j++) {
for (uint32_t k = 0; k<j; k++) {
if (tri_u[k]>tri_u[j]) {
float tri_u_temp = tri_u[j]; tri_u[j] = tri_u[k]; tri_u[k] = tri_u_temp;
float tri_v_temp = tri_v[j]; tri_v[j] = tri_v[k]; tri_v[k] = tri_v_temp;
}
}
}
// rename corners
float A_u = tri_u[0]; float A_v = tri_v[0];
float B_u = tri_u[1]; float B_v = tri_v[1];
float C_u = tri_u[2]; float C_v = tri_v[2];
// compute straight lines connecting triangle corners
float AB_a = 0; float AC_a = 0; float BC_a = 0;
if ((int32_t)(A_u) != (int32_t)(B_u)) AB_a = (A_v - B_v) / (A_u - B_u);
if ((int32_t)(A_u) != (int32_t)(C_u)) AC_a = (A_v - C_v) / (A_u - C_u);
if ((int32_t)(B_u) != (int32_t)(C_u)) BC_a = (B_v - C_v) / (B_u - C_u);
float AB_b = A_v - AB_a*A_u;
float AC_b = A_v - AC_a*A_u;
float BC_b = B_v - BC_a*B_u;
// first part (triangle corner A->B)
if ((int32_t)(A_u) != (int32_t)(B_u)) {
for (int32_t u = max((int32_t)A_u, 0); u < min((int32_t)B_u, width); u++) {
if (!param.subsampling || u % 2 == 0) {
int32_t v_1 = (uint32_t)(AC_a*(float)u + AC_b);
int32_t v_2 = (uint32_t)(AB_a*(float)u + AB_b);
for (int32_t v = min(v_1, v_2); v < max(v_1, v_2); v++)
if (!param.subsampling || v % 2 == 0)
{
// *((int16_t*)(tp + 2 * u + v * 2 * width)) = u;
// *((int16_t*)(tp + 2 * u + v * 2 * width) + 1) = v;
// *(tp + 2 * u + v * 2 * width + 1) = i;
if(u + v * width > 320 * 240)
{
printf("hhh\n");
while(1);
}
*(tp + u + v * width ) = i;
// num++;
}
}
}
}
// second part (triangle corner B->C)
if ((int32_t)(B_u) != (int32_t)(C_u)) {
for (int32_t u = max((int32_t)B_u, 0); u < min((int32_t)C_u, width); u++) {
if (!param.subsampling || u % 2 == 0) {
int32_t v_1 = (uint32_t)(AC_a*(float)u + AC_b);
int32_t v_2 = (uint32_t)(BC_a*(float)u + BC_b);
for (int32_t v = min(v_1, v_2); v < max(v_1, v_2); v++)
if (!param.subsampling || v % 2 == 0)
{
// *((int16_t*)(tp + 2 * u + v * 2 * width)) = u;
// *((int16_t*)(tp + 2 * u + v * 2 * width) + 1) = v;
// *(tp + 2 * u + v * 2 * width + 1) = i;
if(u + v * width > 320 * 240)
{
printf("hhh2\n");
while(1);
}
*(tp + u + v * width) = i;
// num++;
}
}
}
}
// tri[i].pointNum = num;
}
}
hipError_t err;
int32_t dims[3] = {WIDTH, HEIGH, WIDTH};
void cuda_computeD(int32_t* disparity_grid_1, int32_t* disparity_grid_2, vector<Elas::support_pt> &p_support, \
vector<Elas::triangle> &tri_1, vector<Elas::triangle> &tri_2, \
float* D1, float* D2, uint8_t* I1, uint8_t* I2, int8_t* P_g,\
int8_t *tp1_g, int8_t* tp2_g, int8_t* tp1_c, int8_t* tp2_c, float* cloud_g)
{
int32_t width, height, bpl;
clock_t t1, t2;
// get width, height and bytes per line
width = dims[0]; //
height = dims[1];
bpl = dims[2]; //
// allocate memory for disparity grid
int32_t grid_width = 16; //(int32_t)ceil((float)width / (float)20);
int32_t grid_height = 12; //(int32_t)ceil((float)height / (float)20);
int32_t grid_dims[3] = { 63 + 2,grid_width,grid_height };
int32_t P_SUPPORT_SIZE = p_support.size();
int32_t TRI_SIZE1 = tri_1.size();
int32_t TRI_SIZE2 = tri_2.size();
tri_size = TRI_SIZE1;
int8_t* tp1_cpu = tp1_c;
int8_t* tp2_cpu = tp2_c;
int8_t *tp1_gpu = tp1_g;
int8_t *tp2_gpu = tp2_g;
computeTrianglePoints(p_support, tri_1, 0, width, TRI_SIZE1, tp1_c);
computeTrianglePoints(p_support, tri_2, 1, width, TRI_SIZE2, tp2_c);
Elas::triangle* tri_gpu_1, *tri_gpu_2;
//int32_t *P_gpu = NULL; //action:::: cannot delete;
hipMalloc((void **)&tri_gpu_1, sizeof(Elas::triangle) * TRI_SIZE1);
hipMalloc((void **)&tri_gpu_2, sizeof(Elas::triangle) * TRI_SIZE2);
// hipMalloc((void **)&P_gpu, sizeof(int8_t) * 64);
err = hipGetLastError();
if(0 != err) printf("cuda error: %s\n", hipGetErrorString(err));
hipMemcpy(tri_gpu_1, &tri_1[0], sizeof(Elas::triangle) * TRI_SIZE1, hipMemcpyHostToDevice);
hipMemcpy(tri_gpu_2, &tri_2[0], sizeof(Elas::triangle) * TRI_SIZE2, hipMemcpyHostToDevice);
//hipMemcpy(P_gpu, P_g, sizeof(int8_t) * 64, hipMemcpyHostToDevice);
err = hipGetLastError();
if(0 != err) printf("cuda error: %s\n", hipGetErrorString(err));
int32_t plane_radius = 2; //(int32_t)max((float)ceil(param.sigma*param.sradius), (float)2.0);
dim3 threads(320, 1);
dim3 grid(1, 240);
// hipDeviceSynchronize();
err = hipGetLastError();
if(0 != err) printf("cuda error: %s\n", hipGetErrorString(err));
printf("goin Triangle_match kernel\n");
Triangle_Match1 << <grid, threads, 0>> > (tri_gpu_1, disparity_grid_1, \
I1, I2, P_g, plane_radius, 0, D1, tp1_g, tri_size);
tri_size = TRI_SIZE2;
Triangle_Match1 << <grid, threads, 0>> > (tri_gpu_2, disparity_grid_2, \
I2, I1, P_g, plane_radius, 1, D2, tp2_g, tri_size);
err = hipGetLastError();
if(0 != err) printf("Triangle_Match1 cuda error: %s\n", hipGetErrorString(err));
gpuErrchk(hipDeviceSynchronize());
//if(0 != err) printf("Triangle_Match1 cuda error: %s\n", hipGetErrorString(err));
hipLaunchKernelGGL(( leftRightConsistencyCheck), dim3(grid), dim3(threads), 0, 0, D1, D2);
// hipDeviceSynchronize();
dim3 threads2(320, 1);
dim3 grid2(1, 200);
hipLaunchKernelGGL(( Convert), dim3(grid2), dim3(threads2), 0, 0, D1, cloud_g);
hipFree((void*)tri_gpu_1);
hipFree((void*)tri_gpu_2);
// hipFree((void*)P_gpu);
err =hipDeviceSynchronize();
gpuErrchk(err);
}
/****
* 5. over: cuda_computeD
***/
/***
* 6. leftRightConsistencyCheck
* */
__global__ void leftRightConsistencyCheck(float* D1_g, float* D2_g)
{
int u = blockDim.x * blockIdx.x + threadIdx.x;
int v = blockDim.y * blockIdx.y + threadIdx.y;
// __shared__ float I_du_share[320];
// __shared__ float I_dv_share[320];
uint32_t addr = v * WIDTH + u;
float d1 = *(D1_g + addr);
float d2 = *(D2_g + addr);
float u_warp_1 = u - d1;
float u_warp_2 = u + d2;
if(d1 >= 0 && u_warp_1 >= 0 && u_warp_1 < WIDTH)
{
uint32_t addr_warp = v * WIDTH + (int32_t)u_warp_1;
if(fabs(*(D2_g + addr_warp) - d1) > 2 ) //|| (*(D2_g + addr_warp) - d1) < -2)
*(D1_g + addr) = -1;
}else
*(D1_g + addr) = -1;
}
/***
* 6. leftRightConsistencyCheck
* */
| 13c256e858778966a552b6e725f3a3e52fd3dc5d.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include "cv.h"
#include "highgui.h"
#include "elas.h"
#include <vector>
#include "triangle.h"
#include "matrix.h"
#include <stdlib.h>
using namespace std;
/***
* 1. cudaHostAlloc and cudaFreeHost wrap
* 2. create Descriptor of two img
* 3. compute support point
* 4. convert dis to cloud
* 5. cuda_computeD
* 6. leftRightConsistencyCheck
* */
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__);}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if(code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if(abort) exit(code);
}
}
#define WIDTH 320
#define HEIGH 240
#define GRID_SIZE 20
enum setting { ROBOTICS, MIDDLEBURY };
// parameter set
Elas::parameters param;
struct plane {
float t1a, t1b, t1c;
float t2a;
};
__global__ void leftRightConsistencyCheck(float* D1_g, float* D2_g);
/**
* 1. cudaHostAlloc and cudaFreeHost wrap
*
* */
static int count_alloc = 0;
static int count_free = 0;
void cudaFreeHost_cpuaa(void *p)
{
count_free++;
cudaFreeHost(p);
cudaError_t err = cudaGetLastError();
if(0 != err) printf("cuda error: %s\n", cudaGetErrorString(err));
}
void SetDeviceMap()
{
cudaSetDeviceFlags(cudaDeviceMapHost);
}
void* HostMal(void **p, long size)
{
count_alloc++;
void *p_g;
// cudaHostAlloc((void**)p,size, cudaHostAllocDefault | cudaHostAllocMapped);
cudaHostAlloc((void**)p, size, cudaHostAllocDefault );
cudaError_t err = cudaGetLastError();
if(0 != err) printf("cuda error: %s\n", cudaGetErrorString(err));
//将常规的主机指针转换成指向设备内存空间的指针
cudaHostGetDevicePointer(&p_g, *p, 0);
err = cudaGetLastError();
if(0 != err) printf("cuda error: %s\n", cudaGetErrorString(err));
return p_g;
}
void allocFreeCount()
{
printf("count_alloc = %d\n", count_alloc);
printf("count_free= %d\n", count_free);
}
/**
* 1. over: cudaHostAlloc and cudaFreeHost wrap
* */
//dim3 threads(320 - 6 , 1);
//dim3 grid( 1, 240 -6 ); => (0, 233)
/**
* 2. create Descriptor of two img
* */
__global__ \
void createDesc_gpu_kernel(uint8_t* I_desc, uint8_t* I_du, uint8_t* I_dv)
{
int u = blockDim.x * blockIdx.x + threadIdx.x;
int v = blockDim.y * blockIdx.y + threadIdx.y;
int x = u + 3;
int y = v + 3;
__shared__ uint8_t I_du_share[320 * 5];
__shared__ uint8_t I_dv_share[320 * 3];
uint8_t *I_desc_curr;
for(int i = 0; i < 5; i++){
*(I_du_share + x + i * 320) = *(I_du + x + (y-2 + i) * 320);
}
for(int i = 0; i < 3; i++){
*(I_dv_share + x + i * 320) = *(I_dv + x + (y-1 + i) * 320);
}
__syncthreads();
I_desc_curr = I_desc + (y* WIDTH + x) * 16;
*(I_desc_curr++) = *(I_du_share + (0 * WIDTH + x + 0) );
*(I_desc_curr++) = *(I_du_share + (1 * WIDTH + x - 2) );
*(I_desc_curr++) = *(I_du_share + (1 * WIDTH + x + 0) );
*(I_desc_curr++) = *(I_du_share + (1 * WIDTH + x + 2) );
*(I_desc_curr++) = *(I_du_share + (2 * WIDTH + x - 1) );
*(I_desc_curr++) = *(I_du_share + (2 * WIDTH + x + 0) );
*(I_desc_curr++) = *(I_du_share + (2 * WIDTH + x + 0) );
*(I_desc_curr++) = *(I_du_share + (2 * WIDTH + x + 2) );
*(I_desc_curr++) = *(I_du_share + (3 * WIDTH + x - 2) );
*(I_desc_curr++) = *(I_du_share + (3 * WIDTH + x + 0) );
*(I_desc_curr++) = *(I_du_share + (3 * WIDTH + x + 2) );
*(I_desc_curr++) = *(I_du_share + (4 * WIDTH + x + 0) );
*(I_desc_curr++) = *(I_dv_share + (0 * WIDTH + x + 0) );
*(I_desc_curr++) = *(I_dv_share + (1 * WIDTH + x + 1) );
*(I_desc_curr++) = *(I_dv_share + (1 * WIDTH + x - 1) );
*(I_desc_curr++) = *(I_dv_share + (2 * WIDTH + x + 0) );
}
int __createDesc_gpu(uint8_t* I_desc, uint8_t* I_du_g, uint8_t* I_dv_g )
{
dim3 threads(WIDTH - 6 , 1);
dim3 grid( 1, HEIGH - 6 );
// cudaDeviceSynchronize();
cout<< "create Desc"<<endl;
createDesc_gpu_kernel<<<grid, threads, 0 >>>(I_desc, I_du_g, I_dv_g );
cout<< "create Desc1"<<endl;
gpuErrchk(cudaDeviceSynchronize());
cout<< "create Desc2"<<endl;
// cudaError_t err = cudaGetLastError();
// printf("cuda error: %s\n", cudaGetErrorString(err));
// cudaDeviceSynchronize(); //2.88 - 0.19
}
/**
* 2. over: create Descriptor of two img
* */
/**
* 3. compute support point
* */
__device__ \
uint32_t getAddressOffsetImage1(const int32_t& u, const int32_t& v, const int32_t& width)
{
return v*width + u;
}
__device__ \
unsigned int computeMatchEnergy1(unsigned char* dst1, unsigned char* dst2, int offset)
{
unsigned int a, b, c, e, r0, r4;
a = abs(*(dst1 + offset) - *(dst2 + offset)) + abs(*(dst1 + offset + 1) - *(dst2 + offset + 1));
b = abs(*(dst1 + offset + 2) - *(dst2 + offset + 2)) + abs(*(dst1 + offset + 3) - *(dst2 + offset + 3));
c = abs(*(dst1 + offset + 4) - *(dst2 + offset + 4)) + abs(*(dst1 + offset + 5) - *(dst2 + offset + 5));
e = abs(*(dst1 + offset + 6) - *(dst2 + offset + 6)) + abs(*(dst1 + offset + 7) - *(dst2 + offset + 7));
r0 = a + b + c + e;
a = abs(*(dst1 + offset + 8) - *(dst2 + offset + 8)) + abs(*(dst1 + offset + 9) - *(dst2 + offset + 9));
b = abs(*(dst1 + offset + 10) - *(dst2 + offset + 10)) + abs(*(dst1 + offset + 11) - *(dst2 + offset + 11));
c = abs(*(dst1 + offset + 12) - *(dst2 + offset + 12)) + abs(*(dst1 + offset + 13) - *(dst2 + offset + 13));
e = abs(*(dst1 + offset + 14) - *(dst2 + offset + 14)) + abs(*(dst1 + offset + 15) - *(dst2 + offset + 15));
r4 = a + b + c + e;
return r0 + r4;
}
__device__ \
uint32_t getAddressOffsetGrid1(const int32_t& x, const int32_t& y, \
const int32_t& d, const int32_t& width, const int32_t& disp_num)
{
return (y*width + x)*disp_num + d;
}
__device__ \
void updatePosteriorMinimumNew(unsigned char* dst1, unsigned char* dst2, const int32_t &d, int32_t &val, int32_t &min_val, int32_t &min_d)
{
val = computeMatchEnergy1(dst1, dst2, 0);
if (val<min_val) {
min_val = val;
min_d = d;
}
}
__device__ \
void updatePosteriorMinimumNew1(unsigned char* dst1, unsigned char* dst2, const int32_t &d, const int8_t w, int32_t &val, int32_t &min_val, int32_t &min_d)
{
val = computeMatchEnergy1(dst1, dst2, 0) + w;
if (val<min_val) {
min_val = val;
min_d = d;
}
}
int iDivUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
__device__ \
unsigned int computeMatchEnergy1_new(unsigned char* dst1_1, unsigned char* dst1_2, unsigned char* dst2_1, unsigned char* dst2_2, int32_t u, int32_t u_wrap) {
unsigned int r0, r1, r2, r3;
r0 = 0;
r1 = 0;
r2 = 0;
r3 = 0;
#pragma unroll
for (int i = 0; i < 16; i++) {
r0 += abs(dst2_1[(u_wrap << 4) - 32 + i] - dst1_1[(u << 4) - 32 + i]);
r1 += abs(dst2_1[(u_wrap << 4) + 32 + i] - dst1_1[(u << 4) + 32 + i]);
r2 += abs(dst2_2[(u_wrap << 4) - 32 + i] - dst1_2[(u << 4) - 32 + i]);
r3 += abs(dst2_2[(u_wrap << 4) + 32 + i] - dst1_2[(u << 4) + 32 + i]);
}
return r0 + r1 + r2 + r3;
}
#define D_candidate_stepsize 5
#define INCON_THRESHOLD 5
#define INCON_MIN_SUPPORT 5
#define INCON_WINDOW_SIZE 5
#define SUPPORT_TEXTURE 10
#define DISP_MIN 0
#define DISP_MAX 63
#define SUPPORT_THRESHOLD 0.85
#define U_STEP 2
#define V_STEP 2
#define WINDOW_SIZE 3
#define MIN_1_E 32767
#define MIN_1_D -1
#define MIN_2_E 32767
#define MIN_2_D -1
#define DESC_OFFSET_1 (-16 * U_STEP)
#define DESC_OFFSET_2 (+16 * U_STEP)
#define DESC_OFFSET_3 (-16 * U_STEP)
#define DESC_OFFSET_4 (+16 * U_STEP)
#define BLOCKX 60
#define BLOCKY 1
#define GRIDX 1
#define GRIDY 46
//#define GRIDY 2
//dim3 threads(60, 1);
//dim3 grid(1, 46);
__constant__ uint32_t oneLine = WIDTH * 16;
__global__ void compEner_gpu(uint8_t* I1_desc_shared, uint8_t* I2_desc_shared, int u, int u_wrap, uint32_t* sumResult)
{
int x = threadIdx.x; // x = (0,15)
int32_t sum = 0;
sum = abs(I1_desc_shared[(u - 2) << 4 + x ] - I2_desc_shared[(u_wrap - 2) << 4 + x]);
sum += abs(I1_desc_shared[(u + 2) << 4 + x ] - I2_desc_shared[(u_wrap + 2) << 4 + x]);
sum += abs(I1_desc_shared[(u + 2) << 4 + x + oneLine] - I2_desc_shared[(u_wrap + 2) << 4 + x +oneLine]);
sum += abs(I1_desc_shared[(u - 2) << 4 + x + oneLine] - I2_desc_shared[(u_wrap - 2) << 4 + x +oneLine]);
sumResult[x] = sum;
}
__global__ void sptMathKernel(int32_t D_can_width, int32_t D_can_height, int8_t* D_can, uint8_t* desc1, uint8_t* desc2)
{
int32_t u_wrap;
int disp_max_valid;
int result1 = 0, result2 = 0, result3 = 0, result4 = 0;
int32_t line_offset;
uint8_t *I1_line_addr, *I2_line_addr, *I1_block_addr, *I2_block_addr, *I_line_addr_tmp;
uint8_t *I1_block_addr_1, *I1_block_addr_2, *I2_block_addr_1, *I2_block_addr_2;
int32_t sum = 0;
int16_t min_1_E;
int16_t min_1_d;
int16_t min_2_E;
int16_t min_2_d;
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int u, v, d1 = -1 , d2 = -1;
u = (x + 3) * D_candidate_stepsize; //5
v = (y + 1) * D_candidate_stepsize;
line_offset = 16 * WIDTH*v;
I1_line_addr = desc1 + line_offset;
I2_line_addr = desc2 + line_offset;
__shared__ uint8_t I1_desc_shared[320 * 16 * 2];
__shared__ uint8_t I2_desc_shared[320 * 16 * 2];
for(int i = 0; i < 85; i++){
I1_desc_shared[x + i * BLOCKX ] = *(I1_line_addr + x + i * BLOCKX - 2 * oneLine);
I1_desc_shared[x + i * BLOCKX + oneLine] = *(I1_line_addr + x + i * BLOCKX + 2 * oneLine);
I2_desc_shared[x + i * BLOCKX ] = *(I2_line_addr + x + i * BLOCKX - 2 * oneLine);
I2_desc_shared[x + i * BLOCKX + oneLine] = *(I2_line_addr + x + i * BLOCKX + 2 * oneLine);
}
__syncthreads();
for (int32_t i=0; i<16; i++)
sum += abs((int32_t)(*(I1_line_addr + 16 * u +i))-128);
if (sum<10){
d1 = -1;
return;
}
I1_block_addr_1 = I1_desc_shared + 16 * u ;
I1_block_addr_2 = I1_desc_shared + 16 * u + oneLine ;
disp_max_valid = min(63, u - 5);
min_1_E = MIN_1_E;
min_1_d = MIN_1_D;
min_2_E = MIN_2_E;
min_2_d = MIN_2_D;
for (int16_t d = 0; d <= disp_max_valid; d++) {
u_wrap = u - d;
I2_block_addr_1 = I2_desc_shared + 16 * u_wrap;
I2_block_addr_2 = I2_desc_shared + 16 * u_wrap + oneLine;
// result1 = computeMatchEnergy1(I1_block_addr_1, I2_block_addr_1, DESC_OFFSET_1);
// result2 = computeMatchEnergy1(I1_block_addr_1, I2_block_addr_1, DESC_OFFSET_2);
// result3 = computeMatchEnergy1(I1_block_addr_2, I2_block_addr_2, DESC_OFFSET_3);
result4 = computeMatchEnergy1(I1_block_addr_2, I2_block_addr_2, DESC_OFFSET_4);
// sum = result1 + result2 + result3 + result4;
sum = result4;
if (sum<min_1_E) {
min_2_E = min_1_E;
min_2_d = min_1_d;
min_1_E = sum;
min_1_d = d;
}
else if (sum<min_2_E) {
min_2_E = sum;
min_2_d = d;
}
}
if (min_1_d>=0 && min_2_d>=0 && (float)min_1_E < 0.85*(float)min_2_E)
d1 = min_1_d;
sum = 0;
if (d1 >= 0){
min_1_E = MIN_1_E;
min_1_d = MIN_1_D;
min_2_E = MIN_2_E;
min_2_d = MIN_2_D;
u = u - d1;
disp_max_valid = min(63, WIDTH - u - 5);
I2_block_addr_1 = I2_desc_shared + 16 * u;
I2_block_addr_2 = I2_desc_shared + 16 * u + 320 * 16;
sum = 0;
#pragma unroll
for (int32_t i=0; i<16; i++)
sum += abs((int32_t)(*(I2_line_addr+i))-128);
if (sum<10){
return;
}
sum = 0;
for(int16_t d = 0; d <= disp_max_valid; d++){
u_wrap = u + d;
I1_block_addr_1 = I1_desc_shared + 16 * u_wrap;
I1_block_addr_2 = I1_desc_shared + 16 * u_wrap + 320 * 16;
// result1 = computeMatchEnergy1(I1_block_addr_1, I2_block_addr_1, DESC_OFFSET_1);
// result2 = computeMatchEnergy1(I1_block_addr_1, I2_block_addr_1, DESC_OFFSET_2);
// result3 = computeMatchEnergy1(I1_block_addr_2, I2_block_addr_2, DESC_OFFSET_3);
result4 = computeMatchEnergy1(I1_block_addr_2, I2_block_addr_2, DESC_OFFSET_4);
// sum = result1 + result2 + result3 + result4;
sum = result4;
// sum = computeMatchEnergy1_new(I2_desc_shared, I2_desc_shared + oneLine, I1_desc_shared, I1_desc_shared + oneLine, u, u_wrap);
if (sum<min_1_E) {
min_2_E = min_1_E;
min_2_d = min_1_d;
min_1_E = sum;
min_1_d = d;
}
else if (sum<min_2_E) {
min_2_E = sum;
min_2_d = d;
}
}
if (min_1_d>=0 && min_2_d>=0 && (float)min_1_E < 0.85*(float)min_2_E)
d2 = min_1_d;
if( d2 >= 0 && abs(d2 - d1) <= 2 )
D_can[x + y * D_can_width] = d1;
}
}
void addCornerSupportPoints(vector<Elas::support_pt> &p_support, int32_t width, int32_t height) {
// list of border points
vector<Elas::support_pt> p_border;
p_border.push_back(Elas::support_pt(0, 0, 0));
p_border.push_back(Elas::support_pt(0, height - 1, 0));
p_border.push_back(Elas::support_pt(width - 1, 0, 0));
p_border.push_back(Elas::support_pt(width - 1, height - 1, 0));
// find closest d
for (int32_t i = 0; i<p_border.size(); i++) {
int32_t best_dist = 10000000;
for (int32_t j = 0; j<p_support.size(); j++) {
int32_t du = p_border[i].u - p_support[j].u;
int32_t dv = p_border[i].v - p_support[j].v;
int32_t curr_dist = du*du + dv*dv;
if (curr_dist<best_dist) {
best_dist = curr_dist;
p_border[i].d = p_support[j].d;
}
}
}
// for right image
p_border.push_back(Elas::support_pt(p_border[2].u + p_border[2].d, p_border[2].v, p_border[2].d));
p_border.push_back(Elas::support_pt(p_border[3].u + p_border[3].d, p_border[3].v, p_border[3].d));
// add border points to support points
for (int32_t i = 0; i<p_border.size(); i++)
p_support.push_back(p_border[i]);
}
__global__ void removeInconsistentSupportPoints1(int16_t* D_can, int32_t D_can_width, int32_t D_can_height) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int u, v;
if (x < D_can_width && y < D_can_height) {
int16_t d_can = *(D_can + getAddressOffsetImage1(x, y, D_can_width));
if (d_can >= 0) {
int32_t support = 0;
for (int32_t u_can_2 = x - INCON_WINDOW_SIZE; u_can_2 <= x + INCON_WINDOW_SIZE; u_can_2++) {
for (int32_t v_can_2 = y - INCON_WINDOW_SIZE; v_can_2 <= y + INCON_WINDOW_SIZE; v_can_2++) {
if (u_can_2 >= 0 && v_can_2 >= 0 && u_can_2<D_can_width && v_can_2<D_can_height) {
int16_t d_can_2 = *(D_can + getAddressOffsetImage1(u_can_2, v_can_2, D_can_width));
if (d_can_2 >= 0 && abs(d_can - d_can_2) <= INCON_THRESHOLD)
support++;
}
}
}
// invalidate support point if number of supporting points is too low
if (support<INCON_MIN_SUPPORT)
*(D_can + getAddressOffsetImage1(x, y, D_can_width)) = -1;
}
}
}
__global__ void removeRedundantSupportPoints1(int16_t* D_can, int32_t D_can_width, int32_t D_can_height,
int32_t redun_max_dist, int32_t redun_threshold, bool vertical) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < D_can_width && y < D_can_height) {
// parameters
int32_t redun_dir_u[2] = { 0,0 };
int32_t redun_dir_v[2] = { 0,0 };
if (vertical) {
redun_dir_v[0] = -1;
redun_dir_v[1] = +1;
}
else {
redun_dir_u[0] = -1;
redun_dir_u[1] = +1;
}
int16_t d_can = *(D_can + getAddressOffsetImage1(x, y, D_can_width));
if (d_can >= 0) {
// check all directions for redundancy
bool redundant = true;
for (int32_t i = 0; i<2; i++) {
// search for support
int32_t u_can_2 = x;
int32_t v_can_2 = y;
int16_t d_can_2;
bool support = false;
for (int32_t j = 0; j<redun_max_dist; j++) {
u_can_2 += redun_dir_u[i];
v_can_2 += redun_dir_v[i];
if (u_can_2<0 || v_can_2<0 || u_can_2 >= D_can_width || v_can_2 >= D_can_height)
break;
d_can_2 = *(D_can + getAddressOffsetImage1(u_can_2, v_can_2, D_can_width));
if (d_can_2 >= 0 && abs(d_can - d_can_2) <= redun_threshold) {
support = true;
break;
}
}
// if we have no support => point is not redundant
if (!support) {
redundant = false;
break;
}
}
// invalidate support point if it is redundant
if (redundant)
*(D_can + getAddressOffsetImage1(x, y, D_can_width)) = -1;
}
}
}
vector<Elas::support_pt> computeSupportMatches_g(uint8_t* I_desc1, uint8_t* I_desc2, \
int8_t* D_sup_c, int8_t* D_sup_g)
{
// create matrix for saving disparity candidates
int32_t D_can_width = 60; //[15,310] => 60
int32_t D_can_height = 48; //[5, 230] => 46
gpuErrchk(cudaMemset(D_sup_g, -1, D_can_width*D_can_height * sizeof(int8_t)));
dim3 threads(BLOCKX, BLOCKY);
dim3 grid(GRIDX, GRIDY);
gpuErrchk(cudaFuncSetCacheConfig(sptMathKernel,cudaFuncCachePreferShared));
//compute support
// cudaDeviceSynchronize(); //
sptMathKernel << <grid, threads, 0 >> > (D_can_width, D_can_height, D_sup_g, I_desc1, I_desc2);
//cudaDeviceSynchronize(); //13ms
gpuErrchk(cudaDeviceSynchronize());
//put D_sup to vector of support
vector<Elas::support_pt> p_support;
for (int32_t v_can = 0; v_can<D_can_height; v_can++)
for (int32_t u_can = 0; u_can<D_can_width; u_can++)
if (*(D_sup_c + u_can + v_can * D_can_width) >= 0)
p_support.push_back(Elas::support_pt((u_can + 3)*D_candidate_stepsize,
(v_can + 1) * D_candidate_stepsize,
*(D_sup_c+ u_can + v_can * D_can_width)));
return p_support;
}
/**
* 3. over: compute support point
* */
/**
* 4. convert dis to cloud
***/
__global__ void Convert(float *D_g, float *cloud_g)
{
int u = blockDim.x * blockIdx.x + threadIdx.x;
int v = blockDim.y * blockIdx.y + threadIdx.y;
v += 20;
float w = 0, x = 0, y = 0, z = 0;
float dis = D_g[u + v * WIDTH];
w = 0.006669723997311648 * dis;
x = (float)((u - 161.2100334167481) / w);
y = (float)(- (v - 119.9240913391113) / w); //has bug
z = (float)(241.57918 / w);
*(cloud_g + (u + v * WIDTH) * 3) = x;
*(cloud_g + (u + v * WIDTH) * 3 + 1) = y;
*(cloud_g + (u + v * WIDTH) * 3 + 2) = z;
if(dis == -1)
{
*(cloud_g + (u + v * WIDTH) * 3) = 10000;
*(cloud_g + (u + v * WIDTH) * 3 + 1) = 10000;
*(cloud_g + (u + v * WIDTH) * 3 + 2) = 10000;
}
// *(cloud_g + (u + v * WIDTH) * 3) = z;
// *(cloud_g + (u + v * WIDTH) * 3 + 1) = y;
// *(cloud_g + (u + v * WIDTH) * 3 + 2) = x;
// A = [u, v, d,1];
// Q = [1, 0, 0, -161.2100334167481; 0, 1, 0, -119.9240913391113;
// 0, 0, 0, 241.57918; 0, 0, 0.006669723997311648, 0]
}
int ConvertD2Z(float* D1_g, float* cloud_g)
{
dim3 threads(320, 1);
dim3 grid(1, 200);
// printf("conv\n");
Convert<<<grid, threads>>>(D1_g, cloud_g);
// printf("conv2\n");
// cudaDeviceSynchronize();
}
/**
* 4. over: convert dis to cloud
***/
/****
* 5. cuda_computeD
***/
int tri_size = 0;
__constant__ int32_t grid_dims_g[3] = {65, WIDTH/GRID_SIZE, HEIGH/GRID_SIZE} ;
__constant__ int8_t temp[] = {-14,-9,-2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
__global__ void Triangle_Match1(Elas::triangle* tri, int32_t* disparity_grid,\
uint8_t* I1_desc, uint8_t* I2_desc, int8_t* P, \
int32_t plane_radius, bool right_image, float* D, \
int8_t* tp, int tri_size)
{
float plane_a = 0, plane_b = 0, plane_c = 0, plane_d = 0;
int u = blockDim.x * blockIdx.x + threadIdx.x;
int v = blockDim.y * blockIdx.y + threadIdx.y;
int32_t id;
__shared__ uint8_t __I1_desc_share[320 * 16];
__shared__ uint8_t __I2_desc_share[320 * 16];
if(u >= 320)
printf("\n+++++++++++++ u out %d\n", u);
if(v >= 240)
printf("\n+++++++++++++ v out %d\n", v);
for(int i = 0; i < 16; i += 1 )
{
__I1_desc_share[u + i*320] = I1_desc[v * 320*16 + u + i*320];
__I2_desc_share[u + i*320] = I2_desc[v * 320*16 + u + i*320];
}
__syncthreads();
id = tp[u + v * WIDTH];
if(u + v * WIDTH > 320 * 240)
printf("\n+++++++++++++ id1 out %d\n", u + v * WIDTH);
if(id > tri_size)
printf("\n+++++++++++++ id2 out %d\n", id);
plane_a = tri[id].t1a;
plane_b = tri[id].t1b;
plane_c = tri[id].t1c;
plane_d = tri[id].t2a;
bool valid = fabs(plane_a)<0.7 && fabs(plane_d)<0.7;
const int32_t window_size = 2;
// address of disparity we want to compute
uint32_t d_addr;
d_addr = getAddressOffsetImage1(u, v, WIDTH);
if(d_addr > 320 * 240)
printf("+++++++++d_addr out %d\n", d_addr);
uint8_t *I1_line_addr, *I2_line_addr;
I2_line_addr = __I2_desc_share ;
uint8_t* I1_block_addr = __I1_desc_share + 16 * u;
// does this patch have enough texture?
int32_t sum = 0;
//int32_t match_texture = 1;
// //#pragma unroll
// for (int32_t i = 0; i<16; i++)
// sum += abs((int32_t)(*(I1_block_addr + i)) - 127);
// if (sum<match_texture)
// return;
// compute disparity, min disparity and max disparity of plane prior
int32_t d_plane = (int32_t)(plane_a*(float)u + plane_b*(float)v + plane_c);
// int32_t d_plane = (int32_t)(0);
int32_t d_plane_min = max(d_plane - plane_radius, 0);
int32_t d_plane_max = min(d_plane + plane_radius, grid_dims_g[0] - 2);
// get grid pointer
int32_t grid_x = (int32_t)floor((float)u / (float)GRID_SIZE);
int32_t grid_y = (int32_t)floor((float)v / (float)GRID_SIZE);
//(gird_y * 16 + grid_x) * 65
uint32_t grid_addr = getAddressOffsetGrid1(grid_x, grid_y, 0, grid_dims_g[1], grid_dims_g[0]);
if( (grid_addr + 1) > 65 * 12 * 16)
printf("++++++++++ grid_addr out %d\n", grid_addr);
int32_t num_grid = *(disparity_grid + grid_addr);
if( num_grid > 64 )
printf("++++++++++ num_grid out %d\n", num_grid);
int32_t* d_grid = disparity_grid + grid_addr + 1;
// loop variables
int32_t d_curr, u_warp, val;
int32_t min_val = 10000;
int32_t min_d = -1;
// left image
if (!right_image) {
#pragma unroll
for (int32_t i = 0; i<num_grid; i++) {
d_curr = d_grid[i];
if (d_curr<d_plane_min || d_curr>d_plane_max) {
u_warp = u - d_curr;
if (u_warp<window_size || u_warp >= WIDTH - window_size)
continue;
if(u_warp < 0 || u_warp > 320)
printf("_+++++++++ u_wrap1 out %d\n", u_warp);
updatePosteriorMinimumNew(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, val, min_val, min_d);
}
}
#pragma unroll
for (d_curr = d_plane_min; d_curr <= d_plane_max; d_curr++) {
u_warp = u - d_curr;
if (u_warp<window_size || u_warp >= WIDTH - window_size)
continue;
if(u_warp < 0 || u_warp > 320)
printf("_+++++++++ u_wrap2 out %d\n", u_warp);
// updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, valid ? *(P + abs(d_curr - d_plane)) : 0, val, min_val, min_d);
updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, valid ? *(temp + abs(d_curr - d_plane)) : 0, val, min_val, min_d);
}
}
else {
#pragma unroll
for (int32_t i = 0; i<num_grid; i++) {
d_curr = d_grid[i];
if (d_curr<d_plane_min || d_curr>d_plane_max) {
u_warp = u + d_curr;
if (u_warp<window_size || u_warp >= WIDTH - window_size)
continue;
if(u_warp < 0 || u_warp > 320)
printf("_+++++++++ u_wrap3 out %d\n", u_warp);
updatePosteriorMinimumNew(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, val, min_val, min_d);
}
}
#pragma unroll
for (d_curr = d_plane_min; d_curr <= d_plane_max; d_curr++) {
u_warp = u + d_curr;
if (u_warp<window_size || u_warp >= WIDTH - window_size)
continue;
if(u_warp < 0 || u_warp > 320)
printf("_+++++++++ u_wrap4 out %d\n", u_warp);
// updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, valid ? *(P + abs(d_curr - d_plane)) : 0, val, min_val, min_d);
updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, valid ? *(temp + abs(d_curr - d_plane)) : 0, val, min_val, min_d);
}
}
// set disparity value
// if (min_d >= 0){
if (min_d > 0){
*(D + d_addr) = (float)min_d; // MAP value (min neg-Log probability)
}else *(D + d_addr) = -1; // invalid disparity
}
void computeTrianglePoints(const vector<Elas::support_pt> &p_support, const vector<Elas::triangle> &tri, \
bool right_image, int32_t width, int32_t TRI_SIZE, int8_t* tp) {
// loop variables
int32_t c1, c2, c3;
// float plane_a, plane_b, plane_c, plane_d;
// for all triangles do
for (uint32_t i = 0; i<TRI_SIZE; i++) {
int num = 0;
// get plane parameters
uint32_t p_i = i * 3;
// triangle corners
c1 = tri[i].c1;
c2 = tri[i].c2;
c3 = tri[i].c3;
// sort triangle corners wrt. u (ascending)
float tri_u[3];
if (!right_image) { //左图像
tri_u[0] = p_support[c1].u;
tri_u[1] = p_support[c2].u;
tri_u[2] = p_support[c3].u;
}
else { //右图像
tri_u[0] = p_support[c1].u - p_support[c1].d;
tri_u[1] = p_support[c2].u - p_support[c2].d;
tri_u[2] = p_support[c3].u - p_support[c3].d;
}
float tri_v[3] = { p_support[c1].v,p_support[c2].v,p_support[c3].v };
for (uint32_t j = 0; j<3; j++) {
for (uint32_t k = 0; k<j; k++) {
if (tri_u[k]>tri_u[j]) {
float tri_u_temp = tri_u[j]; tri_u[j] = tri_u[k]; tri_u[k] = tri_u_temp;
float tri_v_temp = tri_v[j]; tri_v[j] = tri_v[k]; tri_v[k] = tri_v_temp;
}
}
}
// rename corners
float A_u = tri_u[0]; float A_v = tri_v[0];
float B_u = tri_u[1]; float B_v = tri_v[1];
float C_u = tri_u[2]; float C_v = tri_v[2];
// compute straight lines connecting triangle corners
float AB_a = 0; float AC_a = 0; float BC_a = 0;
if ((int32_t)(A_u) != (int32_t)(B_u)) AB_a = (A_v - B_v) / (A_u - B_u);
if ((int32_t)(A_u) != (int32_t)(C_u)) AC_a = (A_v - C_v) / (A_u - C_u);
if ((int32_t)(B_u) != (int32_t)(C_u)) BC_a = (B_v - C_v) / (B_u - C_u);
float AB_b = A_v - AB_a*A_u;
float AC_b = A_v - AC_a*A_u;
float BC_b = B_v - BC_a*B_u;
// first part (triangle corner A->B)
if ((int32_t)(A_u) != (int32_t)(B_u)) {
for (int32_t u = max((int32_t)A_u, 0); u < min((int32_t)B_u, width); u++) {
if (!param.subsampling || u % 2 == 0) {
int32_t v_1 = (uint32_t)(AC_a*(float)u + AC_b);
int32_t v_2 = (uint32_t)(AB_a*(float)u + AB_b);
for (int32_t v = min(v_1, v_2); v < max(v_1, v_2); v++)
if (!param.subsampling || v % 2 == 0)
{
// *((int16_t*)(tp + 2 * u + v * 2 * width)) = u;
// *((int16_t*)(tp + 2 * u + v * 2 * width) + 1) = v;
// *(tp + 2 * u + v * 2 * width + 1) = i;
if(u + v * width > 320 * 240)
{
printf("hhh\n");
while(1);
}
*(tp + u + v * width ) = i;
// num++;
}
}
}
}
// second part (triangle corner B->C)
if ((int32_t)(B_u) != (int32_t)(C_u)) {
for (int32_t u = max((int32_t)B_u, 0); u < min((int32_t)C_u, width); u++) {
if (!param.subsampling || u % 2 == 0) {
int32_t v_1 = (uint32_t)(AC_a*(float)u + AC_b);
int32_t v_2 = (uint32_t)(BC_a*(float)u + BC_b);
for (int32_t v = min(v_1, v_2); v < max(v_1, v_2); v++)
if (!param.subsampling || v % 2 == 0)
{
// *((int16_t*)(tp + 2 * u + v * 2 * width)) = u;
// *((int16_t*)(tp + 2 * u + v * 2 * width) + 1) = v;
// *(tp + 2 * u + v * 2 * width + 1) = i;
if(u + v * width > 320 * 240)
{
printf("hhh2\n");
while(1);
}
*(tp + u + v * width) = i;
// num++;
}
}
}
}
// tri[i].pointNum = num;
}
}
cudaError_t err;
int32_t dims[3] = {WIDTH, HEIGH, WIDTH};
void cuda_computeD(int32_t* disparity_grid_1, int32_t* disparity_grid_2, vector<Elas::support_pt> &p_support, \
vector<Elas::triangle> &tri_1, vector<Elas::triangle> &tri_2, \
float* D1, float* D2, uint8_t* I1, uint8_t* I2, int8_t* P_g,\
int8_t *tp1_g, int8_t* tp2_g, int8_t* tp1_c, int8_t* tp2_c, float* cloud_g)
{
int32_t width, height, bpl;
clock_t t1, t2;
// get width, height and bytes per line
width = dims[0]; //
height = dims[1];
bpl = dims[2]; //
// allocate memory for disparity grid
int32_t grid_width = 16; //(int32_t)ceil((float)width / (float)20);
int32_t grid_height = 12; //(int32_t)ceil((float)height / (float)20);
int32_t grid_dims[3] = { 63 + 2,grid_width,grid_height };
int32_t P_SUPPORT_SIZE = p_support.size();
int32_t TRI_SIZE1 = tri_1.size();
int32_t TRI_SIZE2 = tri_2.size();
tri_size = TRI_SIZE1;
int8_t* tp1_cpu = tp1_c;
int8_t* tp2_cpu = tp2_c;
int8_t *tp1_gpu = tp1_g;
int8_t *tp2_gpu = tp2_g;
computeTrianglePoints(p_support, tri_1, 0, width, TRI_SIZE1, tp1_c);
computeTrianglePoints(p_support, tri_2, 1, width, TRI_SIZE2, tp2_c);
Elas::triangle* tri_gpu_1, *tri_gpu_2;
//int32_t *P_gpu = NULL; //action:::: cannot delete;
cudaMalloc((void **)&tri_gpu_1, sizeof(Elas::triangle) * TRI_SIZE1);
cudaMalloc((void **)&tri_gpu_2, sizeof(Elas::triangle) * TRI_SIZE2);
// cudaMalloc((void **)&P_gpu, sizeof(int8_t) * 64);
err = cudaGetLastError();
if(0 != err) printf("cuda error: %s\n", cudaGetErrorString(err));
cudaMemcpy(tri_gpu_1, &tri_1[0], sizeof(Elas::triangle) * TRI_SIZE1, cudaMemcpyHostToDevice);
cudaMemcpy(tri_gpu_2, &tri_2[0], sizeof(Elas::triangle) * TRI_SIZE2, cudaMemcpyHostToDevice);
//cudaMemcpy(P_gpu, P_g, sizeof(int8_t) * 64, cudaMemcpyHostToDevice);
err = cudaGetLastError();
if(0 != err) printf("cuda error: %s\n", cudaGetErrorString(err));
int32_t plane_radius = 2; //(int32_t)max((float)ceil(param.sigma*param.sradius), (float)2.0);
dim3 threads(320, 1);
dim3 grid(1, 240);
// cudaDeviceSynchronize();
err = cudaGetLastError();
if(0 != err) printf("cuda error: %s\n", cudaGetErrorString(err));
printf("goin Triangle_match kernel\n");
Triangle_Match1 << <grid, threads, 0>> > (tri_gpu_1, disparity_grid_1, \
I1, I2, P_g, plane_radius, 0, D1, tp1_g, tri_size);
tri_size = TRI_SIZE2;
Triangle_Match1 << <grid, threads, 0>> > (tri_gpu_2, disparity_grid_2, \
I2, I1, P_g, plane_radius, 1, D2, tp2_g, tri_size);
err = cudaGetLastError();
if(0 != err) printf("Triangle_Match1 cuda error: %s\n", cudaGetErrorString(err));
gpuErrchk(cudaDeviceSynchronize());
//if(0 != err) printf("Triangle_Match1 cuda error: %s\n", cudaGetErrorString(err));
leftRightConsistencyCheck<<<grid, threads, 0>>>(D1, D2);
// cudaDeviceSynchronize();
dim3 threads2(320, 1);
dim3 grid2(1, 200);
Convert<<<grid2, threads2>>>(D1, cloud_g);
cudaFree((void*)tri_gpu_1);
cudaFree((void*)tri_gpu_2);
// cudaFree((void*)P_gpu);
err =cudaDeviceSynchronize();
gpuErrchk(err);
}
/****
* 5. over: cuda_computeD
***/
/***
* 6. leftRightConsistencyCheck
* */
__global__ void leftRightConsistencyCheck(float* D1_g, float* D2_g)
{
int u = blockDim.x * blockIdx.x + threadIdx.x;
int v = blockDim.y * blockIdx.y + threadIdx.y;
// __shared__ float I_du_share[320];
// __shared__ float I_dv_share[320];
uint32_t addr = v * WIDTH + u;
float d1 = *(D1_g + addr);
float d2 = *(D2_g + addr);
float u_warp_1 = u - d1;
float u_warp_2 = u + d2;
if(d1 >= 0 && u_warp_1 >= 0 && u_warp_1 < WIDTH)
{
uint32_t addr_warp = v * WIDTH + (int32_t)u_warp_1;
if(fabs(*(D2_g + addr_warp) - d1) > 2 ) //|| (*(D2_g + addr_warp) - d1) < -2)
*(D1_g + addr) = -1;
}else
*(D1_g + addr) = -1;
}
/***
* 6. leftRightConsistencyCheck
* */
|
91b539554c055a18151a2d3479227934169124b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_tea_leaf_ppcg_init1_kernel [6][1];
static int dims_tea_leaf_ppcg_init1_kernel_h [6][1] = {0};
//user function
__device__
void tea_leaf_ppcg_init1_kernel_gpu(ACC<double> &sd,
ACC<double> &rtemp,
ACC<double> &utemp,
const ACC<double> &z,
const ACC<double> &r,
const double *theta_r) {
sd(0,0) = z(0,0)*(*theta_r);
rtemp(0,0) = r(0,0);
utemp(0,0) = sd(0,0);
}
__global__ void ops_tea_leaf_ppcg_init1_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
const double arg5,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init1_kernel[0][0];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init1_kernel[1][0];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init1_kernel[2][0];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init1_kernel[3][0];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init1_kernel[4][0];
if (idx_x < size0 && idx_y < size1) {
ACC<double> argp0(dims_tea_leaf_ppcg_init1_kernel[0][0], arg0);
ACC<double> argp1(dims_tea_leaf_ppcg_init1_kernel[1][0], arg1);
ACC<double> argp2(dims_tea_leaf_ppcg_init1_kernel[2][0], arg2);
const ACC<double> argp3(dims_tea_leaf_ppcg_init1_kernel[3][0], arg3);
const ACC<double> argp4(dims_tea_leaf_ppcg_init1_kernel[4][0], arg4);
tea_leaf_ppcg_init1_kernel_gpu(argp0, argp1, argp2, argp3,
argp4, &arg5);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_tea_leaf_ppcg_init1_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5) {
#else
void ops_par_loop_tea_leaf_ppcg_init1_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[6] = { arg0, arg1, arg2, arg3, arg4, arg5};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,6,range,43)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(43,"tea_leaf_ppcg_init1_kernel");
OPS_kernels[43].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[2];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 6,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
if (xdim0 != dims_tea_leaf_ppcg_init1_kernel_h[0][0] || xdim1 != dims_tea_leaf_ppcg_init1_kernel_h[1][0] || xdim2 != dims_tea_leaf_ppcg_init1_kernel_h[2][0] || xdim3 != dims_tea_leaf_ppcg_init1_kernel_h[3][0] || xdim4 != dims_tea_leaf_ppcg_init1_kernel_h[4][0]) {
dims_tea_leaf_ppcg_init1_kernel_h[0][0] = xdim0;
dims_tea_leaf_ppcg_init1_kernel_h[1][0] = xdim1;
dims_tea_leaf_ppcg_init1_kernel_h[2][0] = xdim2;
dims_tea_leaf_ppcg_init1_kernel_h[3][0] = xdim3;
dims_tea_leaf_ppcg_init1_kernel_h[4][0] = xdim4;
cutilSafeCall(hipMemcpyToSymbol( dims_tea_leaf_ppcg_init1_kernel, dims_tea_leaf_ppcg_init1_kernel_h, sizeof(dims_tea_leaf_ppcg_init1_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
char *p_a[6];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args,6,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[43].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
hipLaunchKernelGGL(( ops_tea_leaf_ppcg_init1_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], *(double *)arg5.data,x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[43].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[43].mpi_time += t2-t1;
OPS_kernels[43].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[43].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[43].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[43].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[43].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
#ifdef OPS_LAZY
void ops_par_loop_tea_leaf_ppcg_init1_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 43;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 43;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 6;
desc->args = (ops_arg*)malloc(6*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
char *tmp = (char*)malloc(1*sizeof(double));
memcpy(tmp, arg5.data,1*sizeof(double));
desc->args[5].data = tmp;
desc->function = ops_par_loop_tea_leaf_ppcg_init1_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(43,"tea_leaf_ppcg_init1_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| 91b539554c055a18151a2d3479227934169124b8.cu | //
// auto-generated by ops.py
//
__constant__ int dims_tea_leaf_ppcg_init1_kernel [6][1];
static int dims_tea_leaf_ppcg_init1_kernel_h [6][1] = {0};
//user function
__device__
void tea_leaf_ppcg_init1_kernel_gpu(ACC<double> &sd,
ACC<double> &rtemp,
ACC<double> &utemp,
const ACC<double> &z,
const ACC<double> &r,
const double *theta_r) {
sd(0,0) = z(0,0)*(*theta_r);
rtemp(0,0) = r(0,0);
utemp(0,0) = sd(0,0);
}
__global__ void ops_tea_leaf_ppcg_init1_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
const double arg5,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init1_kernel[0][0];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init1_kernel[1][0];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init1_kernel[2][0];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init1_kernel[3][0];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init1_kernel[4][0];
if (idx_x < size0 && idx_y < size1) {
ACC<double> argp0(dims_tea_leaf_ppcg_init1_kernel[0][0], arg0);
ACC<double> argp1(dims_tea_leaf_ppcg_init1_kernel[1][0], arg1);
ACC<double> argp2(dims_tea_leaf_ppcg_init1_kernel[2][0], arg2);
const ACC<double> argp3(dims_tea_leaf_ppcg_init1_kernel[3][0], arg3);
const ACC<double> argp4(dims_tea_leaf_ppcg_init1_kernel[4][0], arg4);
tea_leaf_ppcg_init1_kernel_gpu(argp0, argp1, argp2, argp3,
argp4, &arg5);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_tea_leaf_ppcg_init1_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5) {
#else
void ops_par_loop_tea_leaf_ppcg_init1_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[6] = { arg0, arg1, arg2, arg3, arg4, arg5};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,6,range,43)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(43,"tea_leaf_ppcg_init1_kernel");
OPS_kernels[43].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[2];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 6,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
if (xdim0 != dims_tea_leaf_ppcg_init1_kernel_h[0][0] || xdim1 != dims_tea_leaf_ppcg_init1_kernel_h[1][0] || xdim2 != dims_tea_leaf_ppcg_init1_kernel_h[2][0] || xdim3 != dims_tea_leaf_ppcg_init1_kernel_h[3][0] || xdim4 != dims_tea_leaf_ppcg_init1_kernel_h[4][0]) {
dims_tea_leaf_ppcg_init1_kernel_h[0][0] = xdim0;
dims_tea_leaf_ppcg_init1_kernel_h[1][0] = xdim1;
dims_tea_leaf_ppcg_init1_kernel_h[2][0] = xdim2;
dims_tea_leaf_ppcg_init1_kernel_h[3][0] = xdim3;
dims_tea_leaf_ppcg_init1_kernel_h[4][0] = xdim4;
cutilSafeCall(cudaMemcpyToSymbol( dims_tea_leaf_ppcg_init1_kernel, dims_tea_leaf_ppcg_init1_kernel_h, sizeof(dims_tea_leaf_ppcg_init1_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
char *p_a[6];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args,6,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[43].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
ops_tea_leaf_ppcg_init1_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], *(double *)arg5.data,x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[43].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[43].mpi_time += t2-t1;
OPS_kernels[43].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[43].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[43].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[43].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[43].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
#ifdef OPS_LAZY
void ops_par_loop_tea_leaf_ppcg_init1_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 43;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 43;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 6;
desc->args = (ops_arg*)malloc(6*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
char *tmp = (char*)malloc(1*sizeof(double));
memcpy(tmp, arg5.data,1*sizeof(double));
desc->args[5].data = tmp;
desc->function = ops_par_loop_tea_leaf_ppcg_init1_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(43,"tea_leaf_ppcg_init1_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
9cd7ad817927e7e793c47e38f37bfac33a307f36.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef USE_CUDNN
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/cudnn_batch_norm_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
void CuDNNBatchNormLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype *bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0] == bottom[0] ?
private_top_.mutable_gpu_data() : top[0]->mutable_gpu_data();
//Dtype *top_data = top[0]->mutable_gpu_data();
const Dtype *scale_data;
const Dtype *bias_data;
Dtype *global_mean;
Dtype *global_var;
Dtype *save_mean;
Dtype *save_inv_var;
//printf("######################################## before cudnn batch\r\n");
if (this->phase_ == TRAIN) {
global_mean = this->blobs_[0]->mutable_gpu_data();
global_var = this->blobs_[1]->mutable_gpu_data();
save_mean = save_mean_.mutable_gpu_data();
save_inv_var = save_inv_var_.mutable_gpu_data();
}
else {
global_mean = this->blobs_[0]->mutable_gpu_data();
global_var = this->blobs_[1]->mutable_gpu_data();
}
if (this->scale_bias_) {
scale_data = this->blobs_[3]->gpu_data();
bias_data = this->blobs_[4]->gpu_data();
}
else {
scale_data = scale_ones_.gpu_data();
bias_data = bias_zeros_.gpu_data();
}
if (this->phase_ == TEST || this->use_global_stats_) {
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(handle_,
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
bottom_desc_, bottom_data, top_desc_, top_data,
scale_bias_mean_var_desc_, scale_data, bias_data,
global_mean, global_var, CUDNN_BN_MIN_EPSILON));
}
else if (this->phase_ == TRAIN) {
Dtype factor = 1. - this->moving_average_fraction_;
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(handle_, mode_,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
bottom_desc_, bottom_data, top_desc_, top_data,
scale_bias_mean_var_desc_, scale_data, bias_data,
factor, global_mean, global_var, CUDNN_BN_MIN_EPSILON, save_mean, save_inv_var));
}
else {
LOG(FATAL) << "Unknown phase";
}
//CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
if (top[0] == bottom[0]) {
private_bottom_.CopyFrom(*bottom[0]);
top[0]->CopyFrom(private_top_);
}
//printf("######################################## after cudnn batch\r\n");
}
template <typename Dtype>
void CuDNNBatchNormLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype *top_diff = top[0]->gpu_diff();
Dtype * bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* bottom_data = top[0] == bottom[0] ?
private_bottom_.gpu_data() : bottom[0]->gpu_data();
//const Dtype *bottom_data = bottom[0]->gpu_data();
//double epsilon = this->eps_;
const Dtype* save_mean;
const Dtype* save_inv_var;
const Dtype* scale_data;
Dtype* scale_diff;
Dtype* bias_diff;
save_mean = save_mean_.gpu_data();
save_inv_var = save_inv_var_.gpu_data();
if (this->scale_bias_) {
scale_data = this->blobs_[3]->gpu_data();
scale_diff = this->blobs_[3]->mutable_gpu_diff();
bias_diff = this->blobs_[4]->mutable_gpu_diff();
}
else {
scale_data = scale_ones_.gpu_data();
scale_diff = scale_ones_.mutable_gpu_diff();
bias_diff = bias_zeros_.mutable_gpu_diff();
}
if (top[0] == bottom[0]) {
// copy diff from top to private_top
private_top_.CopyFrom(*top[0], true);
top_diff = private_top_.gpu_diff();
}
CUDNN_CHECK(cudnnBatchNormalizationBackward(handle_, mode_,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::one,
bottom_desc_, bottom_data, bottom_desc_, top_diff, bottom_desc_, bottom_diff,
scale_bias_mean_var_desc_, scale_data, scale_diff, bias_diff,
CUDNN_BN_MIN_EPSILON, save_mean, save_inv_var));
/*CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream(0)));*/
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNBatchNormLayer);
}
#endif | 9cd7ad817927e7e793c47e38f37bfac33a307f36.cu | #ifdef USE_CUDNN
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/cudnn_batch_norm_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
void CuDNNBatchNormLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype *bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0] == bottom[0] ?
private_top_.mutable_gpu_data() : top[0]->mutable_gpu_data();
//Dtype *top_data = top[0]->mutable_gpu_data();
const Dtype *scale_data;
const Dtype *bias_data;
Dtype *global_mean;
Dtype *global_var;
Dtype *save_mean;
Dtype *save_inv_var;
//printf("######################################## before cudnn batch\r\n");
if (this->phase_ == TRAIN) {
global_mean = this->blobs_[0]->mutable_gpu_data();
global_var = this->blobs_[1]->mutable_gpu_data();
save_mean = save_mean_.mutable_gpu_data();
save_inv_var = save_inv_var_.mutable_gpu_data();
}
else {
global_mean = this->blobs_[0]->mutable_gpu_data();
global_var = this->blobs_[1]->mutable_gpu_data();
}
if (this->scale_bias_) {
scale_data = this->blobs_[3]->gpu_data();
bias_data = this->blobs_[4]->gpu_data();
}
else {
scale_data = scale_ones_.gpu_data();
bias_data = bias_zeros_.gpu_data();
}
if (this->phase_ == TEST || this->use_global_stats_) {
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(handle_,
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
bottom_desc_, bottom_data, top_desc_, top_data,
scale_bias_mean_var_desc_, scale_data, bias_data,
global_mean, global_var, CUDNN_BN_MIN_EPSILON));
}
else if (this->phase_ == TRAIN) {
Dtype factor = 1. - this->moving_average_fraction_;
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(handle_, mode_,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
bottom_desc_, bottom_data, top_desc_, top_data,
scale_bias_mean_var_desc_, scale_data, bias_data,
factor, global_mean, global_var, CUDNN_BN_MIN_EPSILON, save_mean, save_inv_var));
}
else {
LOG(FATAL) << "Unknown phase";
}
//CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
if (top[0] == bottom[0]) {
private_bottom_.CopyFrom(*bottom[0]);
top[0]->CopyFrom(private_top_);
}
//printf("######################################## after cudnn batch\r\n");
}
template <typename Dtype>
void CuDNNBatchNormLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype *top_diff = top[0]->gpu_diff();
Dtype * bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* bottom_data = top[0] == bottom[0] ?
private_bottom_.gpu_data() : bottom[0]->gpu_data();
//const Dtype *bottom_data = bottom[0]->gpu_data();
//double epsilon = this->eps_;
const Dtype* save_mean;
const Dtype* save_inv_var;
const Dtype* scale_data;
Dtype* scale_diff;
Dtype* bias_diff;
save_mean = save_mean_.gpu_data();
save_inv_var = save_inv_var_.gpu_data();
if (this->scale_bias_) {
scale_data = this->blobs_[3]->gpu_data();
scale_diff = this->blobs_[3]->mutable_gpu_diff();
bias_diff = this->blobs_[4]->mutable_gpu_diff();
}
else {
scale_data = scale_ones_.gpu_data();
scale_diff = scale_ones_.mutable_gpu_diff();
bias_diff = bias_zeros_.mutable_gpu_diff();
}
if (top[0] == bottom[0]) {
// copy diff from top to private_top
private_top_.CopyFrom(*top[0], true);
top_diff = private_top_.gpu_diff();
}
CUDNN_CHECK(cudnnBatchNormalizationBackward(handle_, mode_,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::one,
bottom_desc_, bottom_data, bottom_desc_, top_diff, bottom_desc_, bottom_diff,
scale_bias_mean_var_desc_, scale_data, scale_diff, bias_diff,
CUDNN_BN_MIN_EPSILON, save_mean, save_inv_var));
/*CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream(0)));*/
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNBatchNormLayer);
}
#endif |
2cbdede911308a95b97d276f1aa0cc3cdba13fcd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void warmingUp(int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
if(idx >= n) return ;
for(int stride = 1; stride < blockDim.x; stride <<= 1){
if(tid % (2 * stride) == 0)
idata[tid] += idata[tid + stride];
__syncthreads();
}
if(tid == 0) g_odata[blockIdx.x] = idata[0];
} | 2cbdede911308a95b97d276f1aa0cc3cdba13fcd.cu | #include "includes.h"
__global__ void warmingUp(int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
if(idx >= n) return ;
for(int stride = 1; stride < blockDim.x; stride <<= 1){
if(tid % (2 * stride) == 0)
idata[tid] += idata[tid + stride];
__syncthreads();
}
if(tid == 0) g_odata[blockIdx.x] = idata[0];
} |
787628358590d4c9115d487305f34e5678ab6e55.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_structs.h"
//Arithmetic in projective coordinates (Jacobian coordinates should be faster and we are going to check it!)
//TODO: we may also use BN specific optimizations (for example use, that a = 0)
//------------------------------------------------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------------------------------------------------
DEVICE_FUNC ec_point ECC_DOUBLE_PROJ(const ec_point& pt)
{
if (is_zero(pt.y) || is_infinity(pt))
return point_at_infty();
else
{
uint256_g temp, temp2;
uint256_g W, S, B, H, S2;
ec_point res;
#ifdef BN256_SPECIFIC_OPTIMIZATION
temp = MONT_SQUARE(pt.x);
W = MONT_MUL(temp, R3_g);
#else
temp = MONT_SQUARE(pt.x);
temp = MONT_MUL(temp, BASE_FIELD_R3);
temp2 = MONT_SQUARE(pt.z);
temp2 = MONT_MUL(temp2, CURVE_A_COEFF);
W = FIELD_ADD(temp, temp2);
#endif
S = MONT_MUL(pt.y, pt.z);
temp = MONT_MUL(pt.x, pt.y);
B = MONT_MUL(temp, S);
res.x = W;
temp = MONT_SQUARE(W);
temp2 = MONT_MUL(BASE_FIELD_R8, B);
H = FIELD_SUB(temp, temp2);
temp = MONT_MUL(BASE_FIELD_R2, H);
res.x = MONT_MUL(temp, S);
//NB: here result is also equal to one of the operands and hence may be reused!!!
//NB: this is in fact another possibility for optimization!
S2 = MONT_SQUARE(S);
temp = MONT_MUL(BASE_FIELD_R4, B);
temp = FIELD_SUB(temp, H);
temp = MONT_MUL(W, temp);
temp2 = MONT_SQUARE(pt.y);
temp2 = MONT_MUL(BASE_FIELD_R8, temp2);
temp2 = MONT_MUL(temp2, S2);
res.y = FIELD_SUB(temp, temp2);
temp = MONT_MUL(BASE_FIELD_R8, S);
res.z = MONT_MUL(temp, S2);
return res;
}
}
//for debug purposes only: check if point is indeed on curve
DEVICE_FUNC bool IS_ON_CURVE_PROJ(const ec_point& pt)
{
//y^{2} * z = x^{3} + A *x * z^{2} + B * z^{3}
uint256_g temp1, temp2, z2;
z2 = MONT_SQUARE(pt.z);
temp1 = MONT_SQUARE(pt.x);
temp1 = MONT_MUL(temp1, pt.x);
temp2 = MONT_MUL(CURVE_A_COEFF, pt.x);
temp2 = MONT_MUL(temp2, z2);
temp1 = FIELD_ADD(temp1, temp2);
temp2 = MONT_MUL(CURVE_B_COEFF, pt.z);
temp2 = MONT_MUL(temp2, z2);
temp1 = FIELD_ADD(temp1, temp2);
temp2 = MONT_SQUARE(pt.y);
temp2 = MONT_MUL(temp2, pt.z);
return EQUAL(temp1, temp2);
}
DEVICE_FUNC bool EQUAL_PROJ(const ec_point& pt1, const ec_point& pt2)
{
//check all of the following equations:
//X_1 * Y_2 = Y_1 * X_2;
//X_1 * Z_2 = X_2 * Y_1;
//Y_1 * Z_2 = Z_1 * Y_2;
uint256_g temp1, temp2;
temp1 = MONT_MUL(pt1.x, pt2.y);
temp2 = MONT_MUL(pt1.y, pt2.x);
bool first_check = EQUAL(temp1, temp2);
temp1 = MONT_MUL(pt1.y, pt2.z);
temp2 = MONT_MUL(pt1.z, pt2.y);
bool second_check = EQUAL(temp1, temp2);
temp1 = MONT_MUL(pt1.x, pt2.z);
temp2 = MONT_MUL(pt1.z, pt2.x);
bool third_check = EQUAL(temp1, temp2);
return (first_check && second_check && third_check);
}
DEVICE_FUNC ec_point ECC_ADD_PROJ(const ec_point& left, const ec_point& right)
{
if (is_infinity(left))
return right;
if (is_infinity(right))
return left;
uint256_g U1, U2, V1, V2;
U1 = MONT_MUL(left.z, right.y);
U2 = MONT_MUL(left.y, right.z);
V1 = MONT_MUL(left.z, right.x);
V2 = MONT_MUL(left.x, right.z);
ec_point res;
if (EQUAL(V1, V2))
{
if (!EQUAL(U1, U2))
return point_at_infty();
else
return ECC_DOUBLE_PROJ(left);
}
uint256_g U = FIELD_SUB(U1, U2);
uint256_g V = FIELD_SUB(V1, V2);
uint256_g W = MONT_MUL(left.z, right.z);
uint256_g Vsq = MONT_SQUARE(V);
uint256_g Vcube = MONT_MUL(Vsq, V);
uint256_g temp1, temp2;
temp1 = MONT_SQUARE(U);
temp1 = MONT_MUL(temp1, W);
temp1 = FIELD_SUB(temp1, Vcube);
temp2 = MONT_MUL(BASE_FIELD_R2, Vsq);
temp2 = MONT_MUL(temp2, V2);
uint256_g A = FIELD_SUB(temp1, temp2);
res.x = MONT_MUL(V, A);
temp1 = MONT_MUL(Vsq, V2);
temp1 = FIELD_SUB(temp1, A);
temp1 = MONT_MUL(U, temp1);
temp2 = MONT_MUL(Vcube, U2);
res.y = FIELD_SUB(temp1, temp2);
res.z = MONT_MUL(Vcube, W);
return res;
}
DEVICE_FUNC ec_point ECC_SUB_PROJ(const ec_point& left, const ec_point& right)
{
return ECC_ADD_PROJ(left, INV(right));
}
DEVICE_FUNC ec_point ECC_ADD_MIXED_PROJ(const ec_point& left, const affine_point& right)
{
if (is_infinity(left))
return ec_point{right.x, right.y, BASE_FIELD_R};
uint256_g U1, V1;
U1 = MONT_MUL(left.z, right.y);
V1 = MONT_MUL(left.z, right.x);
ec_point res;
if (EQUAL(V1, left.x))
{
if (!EQUAL(U1, left.y))
return point_at_infty();
else
return ECC_DOUBLE_PROJ(left);
}
uint256_g U = FIELD_SUB(U1, left.y);
uint256_g V = FIELD_SUB(V1, left.x);
uint256_g Vsq = MONT_SQUARE(V);
uint256_g Vcube = MONT_MUL(Vsq, V);
uint256_g temp1, temp2;
temp1 = MONT_SQUARE(U);
temp1 = MONT_MUL(temp1, left.z);
temp1 = FIELD_SUB(temp1, Vcube);
temp2 = MONT_MUL(BASE_FIELD_R2, Vsq);
temp2 = MONT_MUL(temp2, left.x);
uint256_g A = FIELD_SUB(temp1, temp2);
res.x = MONT_MUL(V, A);
temp1 = MONT_MUL(Vsq, left.x);
temp1 = FIELD_SUB(temp1, A);
temp1 = MONT_MUL(U, temp1);
temp2 = MONT_MUL(Vcube, left.y);
res.y = FIELD_SUB(temp1, temp2);
res.z = MONT_MUL(Vcube, left.z);
return res;
}
// Arithmetic in Jacobian coordinates (Jacobian coordinates should be faster and we are going to check it!)
// TODO: we may also use BN specific optimizations (for example use, that a = 0)
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------------------------------------------------------------
//TODO: An alternative repeated doubling routine with costs (4m)M + (4m+2)S for any value a can be derived from the Modified Jacobian doubling routine.
// For small values a (say 0 or -3) the costs reduce to (4m-1)M + (4m+2)S, competing nicely with the algorithm showed above.
DEVICE_FUNC ec_point ECC_DOUBLE_JAC(const ec_point& pt)
{
if (is_zero(pt.y) || is_infinity(pt))
return point_at_infty();
else
{
uint256_g temp1, temp2;
temp1 = MONT_MUL(BASE_FIELD_R4, pt.x);
uint256_g Ysq = MONT_SQUARE(pt.y);
uint256_g S = MONT_MUL(temp1, Ysq);
//TODO: here we may also use BN-SPECIFIC optimizations, cause A = 0
temp1 = MONT_SQUARE(pt.x);
temp1 = MONT_MUL(BASE_FIELD_R3, temp1);
temp2 = MONT_SQUARE(pt.z);
temp2 = MONT_SQUARE(temp2);
temp2 = MONT_MUL(temp2, CURVE_A_COEFF);
uint256_g M = FIELD_ADD(temp1, temp2);
temp1 = MONT_SQUARE(M);
temp2 = MONT_MUL(BASE_FIELD_R2, S);
uint256_g res_x = FIELD_SUB(temp1, temp2);
temp1 = FIELD_SUB(S, res_x);
temp1 = MONT_MUL(M, temp1);
temp2 = MONT_SQUARE(Ysq);
temp2 = MONT_MUL(BASE_FIELD_R8, temp2);
uint256_g res_y = FIELD_SUB(temp1, temp2);
temp1 = MONT_MUL(BASE_FIELD_R2, pt.y);
uint256_g res_z = MONT_MUL(temp1, pt.z);
return ec_point{res_x, res_y, res_z};
}
}
DEVICE_FUNC bool IS_ON_CURVE_JAC(const ec_point& pt)
{
//y^4 = x^3 + a x z^4 +b z^6
uint256_g temp1 = MONT_SQUARE(pt.y);
uint256_g lefthandside = MONT_SQUARE(temp1);
uint256_g Zsq = MONT_SQUARE(pt.z);
uint256_g Z4 = MONT_SQUARE(Zsq);
temp1 = MONT_SQUARE(pt.x);
uint256_g righthandside = MONT_MUL(temp1, pt.x);
temp1 = MONT_MUL(CURVE_A_COEFF, pt.x);
temp1 = MONT_MUL(temp1, Z4);
righthandside = FIELD_ADD(righthandside, temp1);
temp1 = MONT_MUL(CURVE_B_COEFF, Zsq);
temp1 = MONT_MUL(temp1, Z4);
righthandside = FIELD_ADD(righthandside, temp1);
return EQUAL(lefthandside, righthandside);
}
DEVICE_FUNC bool EQUAL_JAC(const ec_point& pt1, const ec_point& pt2)
{
if (is_infinity(pt1) ^ is_infinity(pt2))
return false;
if (is_infinity(pt1) & is_infinity(pt2))
return true;
//now both points are not points at infinity.
uint256_g Z1sq = MONT_SQUARE(pt1.z);
uint256_g Z2sq = MONT_SQUARE(pt2.z);
uint256_g temp1 = MONT_MUL(pt1.x, Z2sq);
uint256_g temp2 = MONT_MUL(pt2.x, Z1sq);
bool first_check = EQUAL(temp1, temp2);
temp1 = MONT_MUL(pt1.y, Z2sq);
temp1 = MONT_MUL(temp1, pt2.z);
temp2 = MONT_MUL(pt2.y, Z1sq);
temp2 = MONT_MUL(temp2, pt2.z);
bool second_check = EQUAL(temp1, temp2);
return (first_check && second_check);
}
DEVICE_FUNC ec_point ECC_ADD_JAC(const ec_point& left, const ec_point& right)
{
if (is_infinity(left))
return right;
if (is_infinity(right))
return left;
uint256_g U1, U2;
uint256_g Z2sq = MONT_SQUARE(right.z);
U1 = MONT_MUL(left.x, Z2sq);
uint256_g Z1sq = MONT_SQUARE(left.z);
U2 = MONT_MUL(right.x, Z1sq);
uint256_g S1 = MONT_MUL(left.y, Z2sq);
S1 = MONT_MUL(S1, right.z);
uint256_g S2 = MONT_MUL(right.y, Z1sq);
S2 = MONT_MUL(S2, left.z);
if (EQUAL(U1, U2))
{
if (!EQUAL(S1, S2))
return point_at_infty();
else
return ECC_DOUBLE_JAC(left);
}
uint256_g H = FIELD_SUB(U2, U1);
uint256_g R = FIELD_SUB(S2, S1);
uint256_g Hsq = MONT_SQUARE(H);
uint256_g Hcube = MONT_MUL(Hsq, H);
uint256_g T = MONT_MUL(U1, Hsq);
uint256_g res_x = MONT_SQUARE(R);
res_x = FIELD_SUB(res_x, Hcube);
uint256_g temp = MONT_MUL(BASE_FIELD_R2, T);
res_x = FIELD_SUB(res_x, temp);
uint256_g res_y = FIELD_SUB(T, res_x);
res_y = MONT_MUL(R, res_y);
temp = MONT_MUL(S1, Hcube);
res_y = FIELD_SUB(res_y, temp);
uint256_g res_z = MONT_MUL(H, left.z);
res_z = MONT_MUL(res_z, right.z);
return ec_point{res_x, res_y, res_z};
}
DEVICE_FUNC ec_point ECC_SUB_JAC(const ec_point& left, const ec_point& right)
{
return ECC_ADD_JAC(left, INV(right));
}
DEVICE_FUNC ec_point ECC_ADD_MIXED_JAC(const ec_point& left, const affine_point& right)
{
if (is_infinity(left))
return ec_point{right.x, right.y, BASE_FIELD_R};
uint256_g U2;
uint256_g Z1sq = MONT_SQUARE(left.z);
U2 = MONT_MUL(right.x, Z1sq);
uint256_g S2 = MONT_MUL(right.y, Z1sq);
S2 = MONT_MUL(S2, left.z);
if (EQUAL(left.x, U2))
{
if (!EQUAL(left.y, S2))
return point_at_infty();
else
return ECC_DOUBLE_JAC(left);
}
uint256_g H = FIELD_SUB(U2, left.x);
uint256_g R = FIELD_SUB(S2, left.y);
uint256_g Hsq = MONT_SQUARE(H);
uint256_g Hcube = MONT_MUL(Hsq, H);
uint256_g T = MONT_MUL(left.x, Hsq);
uint256_g res_x = MONT_SQUARE(R);
res_x = FIELD_SUB(res_x, Hcube);
uint256_g temp = MONT_MUL(BASE_FIELD_R2, T);
res_x = FIELD_SUB(res_x, temp);
uint256_g res_y = FIELD_SUB(T, res_x);
res_y = MONT_MUL(R, res_y);
temp = MONT_MUL(left.y, Hcube);
res_y = FIELD_SUB(res_y, temp);
uint256_g res_z = MONT_MUL(H, left.z);
return ec_point{res_x, res_y, res_z};
}
//TODO: what about repeated doubling (m-fold doubling) for Jacobian coordinates?
//random number generators
//---------------------------------------------------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------------------------------------------------
static DEVICE_FUNC inline uint256_g field_exp(const uint256_g& elem, const uint256_g& power)
{
uint256_g S = elem;
uint256_g Q = BASE_FIELD_R;
for (size_t i = 0; i < N_BITLEN; i++)
{
bool flag = get_bit(power, i);
if (flag)
{
Q = MONT_MUL(Q, S);
}
S = MONT_SQUARE(S);
}
return Q;
}
//The following algorithm is taken from 1st edition of
//Jeffrey Hoffstein, Jill Pipher, J.H. Silverman - An introduction to mathematical cryptography
//Proposition 2.27 on page 84
static DEVICE_FUNC inline optional<uint256_g> field_square_root(const uint256_g& x)
{
uint256_g candidate = field_exp(x, MAGIC_CONSTANT);
using X = optional<uint256_g>;
return (EQUAL(MONT_SQUARE(candidate), x) ? X(candidate) : X(NONE_OPT));
}
DEVICE_FUNC void gen_random_elem(affine_point& pt, hiprandState_t& state)
{
//consider equation in short Weierstrass form: y^2 = x^3 + a * x + b
//generate random x and compute right hand side
//if this is not a square - repeat, again and again, until we are successful
uint256_g x;
optional<uint256_g> y_opt;
while (!y_opt)
{
gen_random_elem(x, state);
//compute righthandside
uint256_g righthandside = MONT_SQUARE(x);
righthandside = MONT_MUL(righthandside, x);
uint256_g temp = MONT_MUL(CURVE_A_COEFF, x);
righthandside = FIELD_ADD(righthandside, temp);
righthandside = FIELD_ADD(righthandside, CURVE_B_COEFF);
y_opt = field_square_root(righthandside);
}
uint256_g y = y_opt.get_val();
if (hiprand(&state) % 2)
y = FIELD_ADD_INV(y);
pt = affine_point{x, y};
}
DEVICE_FUNC void gen_random_elem(ec_point& pt, hiprandState_t& state)
{
affine_point temp;
gen_random_elem(temp, state);
pt = ec_point{temp.x, temp.y, BASE_FIELD_R};
//check if generated point is valid
assert(IS_ON_CURVE(pt));
}
| 787628358590d4c9115d487305f34e5678ab6e55.cu | #include "cuda_structs.h"
//Arithmetic in projective coordinates (Jacobian coordinates should be faster and we are going to check it!)
//TODO: we may also use BN specific optimizations (for example use, that a = 0)
//------------------------------------------------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------------------------------------------------
DEVICE_FUNC ec_point ECC_DOUBLE_PROJ(const ec_point& pt)
{
if (is_zero(pt.y) || is_infinity(pt))
return point_at_infty();
else
{
uint256_g temp, temp2;
uint256_g W, S, B, H, S2;
ec_point res;
#ifdef BN256_SPECIFIC_OPTIMIZATION
temp = MONT_SQUARE(pt.x);
W = MONT_MUL(temp, R3_g);
#else
temp = MONT_SQUARE(pt.x);
temp = MONT_MUL(temp, BASE_FIELD_R3);
temp2 = MONT_SQUARE(pt.z);
temp2 = MONT_MUL(temp2, CURVE_A_COEFF);
W = FIELD_ADD(temp, temp2);
#endif
S = MONT_MUL(pt.y, pt.z);
temp = MONT_MUL(pt.x, pt.y);
B = MONT_MUL(temp, S);
res.x = W;
temp = MONT_SQUARE(W);
temp2 = MONT_MUL(BASE_FIELD_R8, B);
H = FIELD_SUB(temp, temp2);
temp = MONT_MUL(BASE_FIELD_R2, H);
res.x = MONT_MUL(temp, S);
//NB: here result is also equal to one of the operands and hence may be reused!!!
//NB: this is in fact another possibility for optimization!
S2 = MONT_SQUARE(S);
temp = MONT_MUL(BASE_FIELD_R4, B);
temp = FIELD_SUB(temp, H);
temp = MONT_MUL(W, temp);
temp2 = MONT_SQUARE(pt.y);
temp2 = MONT_MUL(BASE_FIELD_R8, temp2);
temp2 = MONT_MUL(temp2, S2);
res.y = FIELD_SUB(temp, temp2);
temp = MONT_MUL(BASE_FIELD_R8, S);
res.z = MONT_MUL(temp, S2);
return res;
}
}
//for debug purposes only: check if point is indeed on curve
DEVICE_FUNC bool IS_ON_CURVE_PROJ(const ec_point& pt)
{
//y^{2} * z = x^{3} + A *x * z^{2} + B * z^{3}
uint256_g temp1, temp2, z2;
z2 = MONT_SQUARE(pt.z);
temp1 = MONT_SQUARE(pt.x);
temp1 = MONT_MUL(temp1, pt.x);
temp2 = MONT_MUL(CURVE_A_COEFF, pt.x);
temp2 = MONT_MUL(temp2, z2);
temp1 = FIELD_ADD(temp1, temp2);
temp2 = MONT_MUL(CURVE_B_COEFF, pt.z);
temp2 = MONT_MUL(temp2, z2);
temp1 = FIELD_ADD(temp1, temp2);
temp2 = MONT_SQUARE(pt.y);
temp2 = MONT_MUL(temp2, pt.z);
return EQUAL(temp1, temp2);
}
DEVICE_FUNC bool EQUAL_PROJ(const ec_point& pt1, const ec_point& pt2)
{
//check all of the following equations:
//X_1 * Y_2 = Y_1 * X_2;
//X_1 * Z_2 = X_2 * Y_1;
//Y_1 * Z_2 = Z_1 * Y_2;
uint256_g temp1, temp2;
temp1 = MONT_MUL(pt1.x, pt2.y);
temp2 = MONT_MUL(pt1.y, pt2.x);
bool first_check = EQUAL(temp1, temp2);
temp1 = MONT_MUL(pt1.y, pt2.z);
temp2 = MONT_MUL(pt1.z, pt2.y);
bool second_check = EQUAL(temp1, temp2);
temp1 = MONT_MUL(pt1.x, pt2.z);
temp2 = MONT_MUL(pt1.z, pt2.x);
bool third_check = EQUAL(temp1, temp2);
return (first_check && second_check && third_check);
}
DEVICE_FUNC ec_point ECC_ADD_PROJ(const ec_point& left, const ec_point& right)
{
if (is_infinity(left))
return right;
if (is_infinity(right))
return left;
uint256_g U1, U2, V1, V2;
U1 = MONT_MUL(left.z, right.y);
U2 = MONT_MUL(left.y, right.z);
V1 = MONT_MUL(left.z, right.x);
V2 = MONT_MUL(left.x, right.z);
ec_point res;
if (EQUAL(V1, V2))
{
if (!EQUAL(U1, U2))
return point_at_infty();
else
return ECC_DOUBLE_PROJ(left);
}
uint256_g U = FIELD_SUB(U1, U2);
uint256_g V = FIELD_SUB(V1, V2);
uint256_g W = MONT_MUL(left.z, right.z);
uint256_g Vsq = MONT_SQUARE(V);
uint256_g Vcube = MONT_MUL(Vsq, V);
uint256_g temp1, temp2;
temp1 = MONT_SQUARE(U);
temp1 = MONT_MUL(temp1, W);
temp1 = FIELD_SUB(temp1, Vcube);
temp2 = MONT_MUL(BASE_FIELD_R2, Vsq);
temp2 = MONT_MUL(temp2, V2);
uint256_g A = FIELD_SUB(temp1, temp2);
res.x = MONT_MUL(V, A);
temp1 = MONT_MUL(Vsq, V2);
temp1 = FIELD_SUB(temp1, A);
temp1 = MONT_MUL(U, temp1);
temp2 = MONT_MUL(Vcube, U2);
res.y = FIELD_SUB(temp1, temp2);
res.z = MONT_MUL(Vcube, W);
return res;
}
DEVICE_FUNC ec_point ECC_SUB_PROJ(const ec_point& left, const ec_point& right)
{
return ECC_ADD_PROJ(left, INV(right));
}
DEVICE_FUNC ec_point ECC_ADD_MIXED_PROJ(const ec_point& left, const affine_point& right)
{
if (is_infinity(left))
return ec_point{right.x, right.y, BASE_FIELD_R};
uint256_g U1, V1;
U1 = MONT_MUL(left.z, right.y);
V1 = MONT_MUL(left.z, right.x);
ec_point res;
if (EQUAL(V1, left.x))
{
if (!EQUAL(U1, left.y))
return point_at_infty();
else
return ECC_DOUBLE_PROJ(left);
}
uint256_g U = FIELD_SUB(U1, left.y);
uint256_g V = FIELD_SUB(V1, left.x);
uint256_g Vsq = MONT_SQUARE(V);
uint256_g Vcube = MONT_MUL(Vsq, V);
uint256_g temp1, temp2;
temp1 = MONT_SQUARE(U);
temp1 = MONT_MUL(temp1, left.z);
temp1 = FIELD_SUB(temp1, Vcube);
temp2 = MONT_MUL(BASE_FIELD_R2, Vsq);
temp2 = MONT_MUL(temp2, left.x);
uint256_g A = FIELD_SUB(temp1, temp2);
res.x = MONT_MUL(V, A);
temp1 = MONT_MUL(Vsq, left.x);
temp1 = FIELD_SUB(temp1, A);
temp1 = MONT_MUL(U, temp1);
temp2 = MONT_MUL(Vcube, left.y);
res.y = FIELD_SUB(temp1, temp2);
res.z = MONT_MUL(Vcube, left.z);
return res;
}
// Arithmetic in Jacobian coordinates (Jacobian coordinates should be faster and we are going to check it!)
// TODO: we may also use BN specific optimizations (for example use, that a = 0)
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------------------------------------------------------------
//TODO: An alternative repeated doubling routine with costs (4m)M + (4m+2)S for any value a can be derived from the Modified Jacobian doubling routine.
// For small values a (say 0 or -3) the costs reduce to (4m-1)M + (4m+2)S, competing nicely with the algorithm showed above.
DEVICE_FUNC ec_point ECC_DOUBLE_JAC(const ec_point& pt)
{
if (is_zero(pt.y) || is_infinity(pt))
return point_at_infty();
else
{
uint256_g temp1, temp2;
temp1 = MONT_MUL(BASE_FIELD_R4, pt.x);
uint256_g Ysq = MONT_SQUARE(pt.y);
uint256_g S = MONT_MUL(temp1, Ysq);
//TODO: here we may also use BN-SPECIFIC optimizations, cause A = 0
temp1 = MONT_SQUARE(pt.x);
temp1 = MONT_MUL(BASE_FIELD_R3, temp1);
temp2 = MONT_SQUARE(pt.z);
temp2 = MONT_SQUARE(temp2);
temp2 = MONT_MUL(temp2, CURVE_A_COEFF);
uint256_g M = FIELD_ADD(temp1, temp2);
temp1 = MONT_SQUARE(M);
temp2 = MONT_MUL(BASE_FIELD_R2, S);
uint256_g res_x = FIELD_SUB(temp1, temp2);
temp1 = FIELD_SUB(S, res_x);
temp1 = MONT_MUL(M, temp1);
temp2 = MONT_SQUARE(Ysq);
temp2 = MONT_MUL(BASE_FIELD_R8, temp2);
uint256_g res_y = FIELD_SUB(temp1, temp2);
temp1 = MONT_MUL(BASE_FIELD_R2, pt.y);
uint256_g res_z = MONT_MUL(temp1, pt.z);
return ec_point{res_x, res_y, res_z};
}
}
DEVICE_FUNC bool IS_ON_CURVE_JAC(const ec_point& pt)
{
//y^4 = x^3 + a x z^4 +b z^6
uint256_g temp1 = MONT_SQUARE(pt.y);
uint256_g lefthandside = MONT_SQUARE(temp1);
uint256_g Zsq = MONT_SQUARE(pt.z);
uint256_g Z4 = MONT_SQUARE(Zsq);
temp1 = MONT_SQUARE(pt.x);
uint256_g righthandside = MONT_MUL(temp1, pt.x);
temp1 = MONT_MUL(CURVE_A_COEFF, pt.x);
temp1 = MONT_MUL(temp1, Z4);
righthandside = FIELD_ADD(righthandside, temp1);
temp1 = MONT_MUL(CURVE_B_COEFF, Zsq);
temp1 = MONT_MUL(temp1, Z4);
righthandside = FIELD_ADD(righthandside, temp1);
return EQUAL(lefthandside, righthandside);
}
DEVICE_FUNC bool EQUAL_JAC(const ec_point& pt1, const ec_point& pt2)
{
if (is_infinity(pt1) ^ is_infinity(pt2))
return false;
if (is_infinity(pt1) & is_infinity(pt2))
return true;
//now both points are not points at infinity.
uint256_g Z1sq = MONT_SQUARE(pt1.z);
uint256_g Z2sq = MONT_SQUARE(pt2.z);
uint256_g temp1 = MONT_MUL(pt1.x, Z2sq);
uint256_g temp2 = MONT_MUL(pt2.x, Z1sq);
bool first_check = EQUAL(temp1, temp2);
temp1 = MONT_MUL(pt1.y, Z2sq);
temp1 = MONT_MUL(temp1, pt2.z);
temp2 = MONT_MUL(pt2.y, Z1sq);
temp2 = MONT_MUL(temp2, pt2.z);
bool second_check = EQUAL(temp1, temp2);
return (first_check && second_check);
}
DEVICE_FUNC ec_point ECC_ADD_JAC(const ec_point& left, const ec_point& right)
{
if (is_infinity(left))
return right;
if (is_infinity(right))
return left;
uint256_g U1, U2;
uint256_g Z2sq = MONT_SQUARE(right.z);
U1 = MONT_MUL(left.x, Z2sq);
uint256_g Z1sq = MONT_SQUARE(left.z);
U2 = MONT_MUL(right.x, Z1sq);
uint256_g S1 = MONT_MUL(left.y, Z2sq);
S1 = MONT_MUL(S1, right.z);
uint256_g S2 = MONT_MUL(right.y, Z1sq);
S2 = MONT_MUL(S2, left.z);
if (EQUAL(U1, U2))
{
if (!EQUAL(S1, S2))
return point_at_infty();
else
return ECC_DOUBLE_JAC(left);
}
uint256_g H = FIELD_SUB(U2, U1);
uint256_g R = FIELD_SUB(S2, S1);
uint256_g Hsq = MONT_SQUARE(H);
uint256_g Hcube = MONT_MUL(Hsq, H);
uint256_g T = MONT_MUL(U1, Hsq);
uint256_g res_x = MONT_SQUARE(R);
res_x = FIELD_SUB(res_x, Hcube);
uint256_g temp = MONT_MUL(BASE_FIELD_R2, T);
res_x = FIELD_SUB(res_x, temp);
uint256_g res_y = FIELD_SUB(T, res_x);
res_y = MONT_MUL(R, res_y);
temp = MONT_MUL(S1, Hcube);
res_y = FIELD_SUB(res_y, temp);
uint256_g res_z = MONT_MUL(H, left.z);
res_z = MONT_MUL(res_z, right.z);
return ec_point{res_x, res_y, res_z};
}
DEVICE_FUNC ec_point ECC_SUB_JAC(const ec_point& left, const ec_point& right)
{
return ECC_ADD_JAC(left, INV(right));
}
DEVICE_FUNC ec_point ECC_ADD_MIXED_JAC(const ec_point& left, const affine_point& right)
{
if (is_infinity(left))
return ec_point{right.x, right.y, BASE_FIELD_R};
uint256_g U2;
uint256_g Z1sq = MONT_SQUARE(left.z);
U2 = MONT_MUL(right.x, Z1sq);
uint256_g S2 = MONT_MUL(right.y, Z1sq);
S2 = MONT_MUL(S2, left.z);
if (EQUAL(left.x, U2))
{
if (!EQUAL(left.y, S2))
return point_at_infty();
else
return ECC_DOUBLE_JAC(left);
}
uint256_g H = FIELD_SUB(U2, left.x);
uint256_g R = FIELD_SUB(S2, left.y);
uint256_g Hsq = MONT_SQUARE(H);
uint256_g Hcube = MONT_MUL(Hsq, H);
uint256_g T = MONT_MUL(left.x, Hsq);
uint256_g res_x = MONT_SQUARE(R);
res_x = FIELD_SUB(res_x, Hcube);
uint256_g temp = MONT_MUL(BASE_FIELD_R2, T);
res_x = FIELD_SUB(res_x, temp);
uint256_g res_y = FIELD_SUB(T, res_x);
res_y = MONT_MUL(R, res_y);
temp = MONT_MUL(left.y, Hcube);
res_y = FIELD_SUB(res_y, temp);
uint256_g res_z = MONT_MUL(H, left.z);
return ec_point{res_x, res_y, res_z};
}
//TODO: what about repeated doubling (m-fold doubling) for Jacobian coordinates?
//random number generators
//---------------------------------------------------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------------------------------------------------
static DEVICE_FUNC inline uint256_g field_exp(const uint256_g& elem, const uint256_g& power)
{
uint256_g S = elem;
uint256_g Q = BASE_FIELD_R;
for (size_t i = 0; i < N_BITLEN; i++)
{
bool flag = get_bit(power, i);
if (flag)
{
Q = MONT_MUL(Q, S);
}
S = MONT_SQUARE(S);
}
return Q;
}
//The following algorithm is taken from 1st edition of
//Jeffrey Hoffstein, Jill Pipher, J.H. Silverman - An introduction to mathematical cryptography
//Proposition 2.27 on page 84
static DEVICE_FUNC inline optional<uint256_g> field_square_root(const uint256_g& x)
{
uint256_g candidate = field_exp(x, MAGIC_CONSTANT);
using X = optional<uint256_g>;
return (EQUAL(MONT_SQUARE(candidate), x) ? X(candidate) : X(NONE_OPT));
}
DEVICE_FUNC void gen_random_elem(affine_point& pt, curandState& state)
{
//consider equation in short Weierstrass form: y^2 = x^3 + a * x + b
//generate random x and compute right hand side
//if this is not a square - repeat, again and again, until we are successful
uint256_g x;
optional<uint256_g> y_opt;
while (!y_opt)
{
gen_random_elem(x, state);
//compute righthandside
uint256_g righthandside = MONT_SQUARE(x);
righthandside = MONT_MUL(righthandside, x);
uint256_g temp = MONT_MUL(CURVE_A_COEFF, x);
righthandside = FIELD_ADD(righthandside, temp);
righthandside = FIELD_ADD(righthandside, CURVE_B_COEFF);
y_opt = field_square_root(righthandside);
}
uint256_g y = y_opt.get_val();
if (curand(&state) % 2)
y = FIELD_ADD_INV(y);
pt = affine_point{x, y};
}
DEVICE_FUNC void gen_random_elem(ec_point& pt, curandState& state)
{
affine_point temp;
gen_random_elem(temp, state);
pt = ec_point{temp.x, temp.y, BASE_FIELD_R};
//check if generated point is valid
assert(IS_ON_CURVE(pt));
}
|
7a91bf255079fd652059ae59a3f1d0bb78ac2a56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include "params.h"
__device__ int getIndex(int t_x, int t_y)
{
// calculate full index from a grid position
int indx = __mul24(t_y,blockDim.x) + t_x;
return __mul24(blockDim.y, __mul24(blockIdx.x, blockDim.x)) + indx;
}
__device__ int getIndex(int t_x)
{
// calculate full index from a grid position
return __mul24(blockDim.y, __mul24(blockIdx.x, blockDim.x)) + t_x;
}
__global__ void d_initRands(hiprandState_t *state, int seed)
{
int id = getIndex(threadIdx.x, threadIdx.y);
/* Each thread gets same seed, a different sequence
* number, no offset */
hiprand_init(seed, id, 0, &state[id]);
}
__global__ void d_updateStates(int* states, int* net, float* wg, int N_x, hiprandState_t* d_rands, int NL, int t)
{
int id = getIndex(threadIdx.x, threadIdx.y);
int edges=80;
int neigh[8][2] = { { 1, 1 }, { 1, 0 }, { 1, -1 } , { 0, 1 }, { 0, -1 }, { -1, -1 } , { -1, 0 }, { -1, 1 } };
int deltan = 0;
int bl = blockIdx.x;
int N = N_x*N_x;
int myInd = threadIdx.y*N_x + threadIdx.x;
//generate random permutation array
int permList[8] = {0,1,2,3,4,5,6,7};
int perm[8] ;//= {0,1,2,3,4,5,6,7};
/*
for (int e=0;e<edges;e++)
{
int n = hiprand_uniform(&d_rands[id])*8;
if (n==8) n==7;
bool up = (hiprand_uniform(&d_rands[id])>0.5);
while (permList[n]<0)
{
if (up) n++;
else n--;
if (n<0)
n=7;
if (n>7)
n=0;
}
perm[e]=permList[n];
permList[n]=-1;
}
// */
int nn = net[N*myInd];
for (int e=0;e<nn;e++)
{
int distance = e;
int n2 = net[N*myInd+e+1];// (((myInd + distance) % N) + N) % N;
int n2_id = getIndex(n2);
if (states[n2_id]>0.5)
deltan++;
}
// if (t>1)
// if (states[id]<0.5)
// printf("%d %d %0.3f %d \n", t, myInd, deltan/80.0, states[id]);
bool debug = true;
// if ((debug))
int sCount = 0;
for (int x_n=0;x_n<N_x;x_n++)
for (int y_n=0;y_n<N_x;y_n++)
{
int n2_id = getIndex(x_n, y_n);
if (states[n2_id]>0.5)
sCount++;
}
// if (sCount==32)
// printf("%d %d %d %d \n",t, deltan, sCount, states[id]);
float localFrac = float(edges)*(2.0*float(deltan)/(1.0f*float(nn)) - 1.0);
// deltan is N+ right now but we want (N+ - N-)
// deltan*=2;
// deltan-=edges;
float cue = 1.0f + ( hiprand_normal(&d_rands[id]) * sqrtf(1.0f/(2.0f*wg[id])) ) ;
float pup = exp(-4.0f*wg[id]*cue);
float pall = pup*powf((1.0f - ws)/ws,localFrac);
int newState;
if (pall<1.0f)
newState = 1;
else
newState = 0;
// if ((states[id]>-0.5)&&(sCount==32))
// printf("%d %d %d %d %d %d %d %f \n",t, myInd, states[id], newState, deltan, nn, sCount, localFrac);
__syncthreads();
if (myInd==t)
states[id] = newState;
}
__global__ void d_recordData(int* states, int* states2, hiprandState_t* d_rands, int N_x, float* d_up, float* d_down, int* d_upcount, int* d_downcount, int t)
{
int group_id = threadIdx.y * N_x + threadIdx.x;
int N = N_x*N_x;
if ((group_id==0)&&(blockIdx.x==0))
for (int b=0;b<gridDim.x;b++)
{
if (t==0)
for (int i=0;i<N;i++)
states2[b * N + i] = states[b * N + i];
else
{
int totalUp = 0;
for (int i=0;i<N;i++)
if (states2[b * N + i] > 0.5)
totalUp++;
int thisUp = 0;
for (int i=0;i<N;i++)
if (states[b * N + i] > 0.5)
thisUp++;
int nowDown = 0;
for (int i=0;i<N;i++)
if ((states2[b * N + i] > 0.5)&&(states[b * N + i] < 0.5))
nowDown++;
int nowUp = 0;
for (int i=0;i<N;i++)
if ((states2[b * N + i] < 0.5)&&(states[b * N + i] > 0.5))
nowUp++;
d_upcount[totalUp]+=1;
int c = d_upcount[totalUp];
// printf("%d %d %d %d\n",t, totalUp,nowDown, nowUp);
if (thisUp<totalUp)
d_down[totalUp] +=1.0;// (1.0)/(float)c + (c-1)*d_down[totalUp]/(float)c;
if (thisUp>totalUp)
d_up[totalUp] +=1.0;// (1.0)/(float)c + (c-1)*d_up[totalUp]/(float)c;
// res[blockIdx.y] = counter/float(t+1) + t*res[blockIdx.y]/float(t+1);
// now for something crazy!!!
// we're going to count all the uppies and then put them all in order
totalUp=0;
for (int i=0;i<N;i++)
{
if (states[b * N + i] > 0.5)
totalUp++;
// states[b * N + i] = 0;
}
// totalUp=32;
/* int nc = 0.875 * totalUp;
float frac = float(totalUp-nc)/float(N-totalUp);
for (int i=0;i<nc;i++)
states[b * N + i] = 1;
for (int i=nc;i<N;i++)
if (hiprand_uniform(&d_rands[group_id])< frac)
states[b * N + i] = 1;
*/
// int i2 = totalUp + 0.5*(N-totalUp);
// states[b * N + i2] = 1;
//
for (int i=0;i<N;i++)
states2[b * N + i] = states[b * N + i];
}
//res[t * gridDim.y + blockIdx.y] = counter;
// if (t==0)
// res[blockIdx.y] = counter;
// else
// res[blockIdx.y] = counter/float(t+1) + t*res[blockIdx.y]/float(t+1);
}
}
__global__ void block_sum(const int *input, int *per_block_results, const size_t n)
{
extern __shared__ int sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
int x = 0;
if(i < n)
{
x = input[i];
}
sdata[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(int offset = blockDim.x / 2;
offset > 0;
offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block hav
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0)
{
per_block_results[blockIdx.x] = sdata[0];
}
}
void initRands(dim3 threadGrid, int numBlocks, hiprandState_t *state, int seed)
{
hipLaunchKernelGGL(( d_initRands), dim3(numBlocks), dim3(threadGrid) , 0, 0, state, seed);
if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" );
}
void advanceTimestep(dim3 threadGrid, int numBlocks, hiprandState_t *rands, float* wg, int* states, int* net, int N_x, int NL, int t)
{
int r = rand() / ( RAND_MAX / (N_x*N_x) );
hipLaunchKernelGGL(( d_updateStates), dim3(numBlocks), dim3(threadGrid) , 0, 0, states, net, wg, N_x, rands, NL, r);
if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" );
}
void recordData(dim3 threadGrid, int numBlocks, int* states, int* states2, hiprandState_t *rands, int N_x, float* d_up, float* d_down, int* d_upcount, int* d_downcount, int t)
{
hipLaunchKernelGGL(( d_recordData), dim3(numBlocks), dim3(threadGrid) , 0, 0, states, states2, rands, N_x, d_up, d_down, d_upcount, d_downcount, t);
if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" );
}
void countStates(int numThreads, int numBlocks, int* states, int* blockTotals, int N_ALL)
{
hipLaunchKernelGGL(( block_sum), dim3(numBlocks), dim3(numThreads), numThreads * sizeof(int) , 0, states, blockTotals, N_ALL);
if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" );
}
| 7a91bf255079fd652059ae59a3f1d0bb78ac2a56.cu |
#include <curand_kernel.h>
#include <stdio.h>
#include "params.h"
__device__ int getIndex(int t_x, int t_y)
{
// calculate full index from a grid position
int indx = __mul24(t_y,blockDim.x) + t_x;
return __mul24(blockDim.y, __mul24(blockIdx.x, blockDim.x)) + indx;
}
__device__ int getIndex(int t_x)
{
// calculate full index from a grid position
return __mul24(blockDim.y, __mul24(blockIdx.x, blockDim.x)) + t_x;
}
__global__ void d_initRands(curandState *state, int seed)
{
int id = getIndex(threadIdx.x, threadIdx.y);
/* Each thread gets same seed, a different sequence
* number, no offset */
curand_init(seed, id, 0, &state[id]);
}
__global__ void d_updateStates(int* states, int* net, float* wg, int N_x, curandState* d_rands, int NL, int t)
{
int id = getIndex(threadIdx.x, threadIdx.y);
int edges=80;
int neigh[8][2] = { { 1, 1 }, { 1, 0 }, { 1, -1 } , { 0, 1 }, { 0, -1 }, { -1, -1 } , { -1, 0 }, { -1, 1 } };
int deltan = 0;
int bl = blockIdx.x;
int N = N_x*N_x;
int myInd = threadIdx.y*N_x + threadIdx.x;
//generate random permutation array
int permList[8] = {0,1,2,3,4,5,6,7};
int perm[8] ;//= {0,1,2,3,4,5,6,7};
/*
for (int e=0;e<edges;e++)
{
int n = curand_uniform(&d_rands[id])*8;
if (n==8) n==7;
bool up = (curand_uniform(&d_rands[id])>0.5);
while (permList[n]<0)
{
if (up) n++;
else n--;
if (n<0)
n=7;
if (n>7)
n=0;
}
perm[e]=permList[n];
permList[n]=-1;
}
// */
int nn = net[N*myInd];
for (int e=0;e<nn;e++)
{
int distance = e;
int n2 = net[N*myInd+e+1];// (((myInd + distance) % N) + N) % N;
int n2_id = getIndex(n2);
if (states[n2_id]>0.5)
deltan++;
}
// if (t>1)
// if (states[id]<0.5)
// printf("%d %d %0.3f %d \n", t, myInd, deltan/80.0, states[id]);
bool debug = true;
// if ((debug))
int sCount = 0;
for (int x_n=0;x_n<N_x;x_n++)
for (int y_n=0;y_n<N_x;y_n++)
{
int n2_id = getIndex(x_n, y_n);
if (states[n2_id]>0.5)
sCount++;
}
// if (sCount==32)
// printf("%d %d %d %d \n",t, deltan, sCount, states[id]);
float localFrac = float(edges)*(2.0*float(deltan)/(1.0f*float(nn)) - 1.0);
// deltan is N+ right now but we want (N+ - N-)
// deltan*=2;
// deltan-=edges;
float cue = 1.0f + ( curand_normal(&d_rands[id]) * sqrtf(1.0f/(2.0f*wg[id])) ) ;
float pup = exp(-4.0f*wg[id]*cue);
float pall = pup*powf((1.0f - ws)/ws,localFrac);
int newState;
if (pall<1.0f)
newState = 1;
else
newState = 0;
// if ((states[id]>-0.5)&&(sCount==32))
// printf("%d %d %d %d %d %d %d %f \n",t, myInd, states[id], newState, deltan, nn, sCount, localFrac);
__syncthreads();
if (myInd==t)
states[id] = newState;
}
__global__ void d_recordData(int* states, int* states2, curandState* d_rands, int N_x, float* d_up, float* d_down, int* d_upcount, int* d_downcount, int t)
{
int group_id = threadIdx.y * N_x + threadIdx.x;
int N = N_x*N_x;
if ((group_id==0)&&(blockIdx.x==0))
for (int b=0;b<gridDim.x;b++)
{
if (t==0)
for (int i=0;i<N;i++)
states2[b * N + i] = states[b * N + i];
else
{
int totalUp = 0;
for (int i=0;i<N;i++)
if (states2[b * N + i] > 0.5)
totalUp++;
int thisUp = 0;
for (int i=0;i<N;i++)
if (states[b * N + i] > 0.5)
thisUp++;
int nowDown = 0;
for (int i=0;i<N;i++)
if ((states2[b * N + i] > 0.5)&&(states[b * N + i] < 0.5))
nowDown++;
int nowUp = 0;
for (int i=0;i<N;i++)
if ((states2[b * N + i] < 0.5)&&(states[b * N + i] > 0.5))
nowUp++;
d_upcount[totalUp]+=1;
int c = d_upcount[totalUp];
// printf("%d %d %d %d\n",t, totalUp,nowDown, nowUp);
if (thisUp<totalUp)
d_down[totalUp] +=1.0;// (1.0)/(float)c + (c-1)*d_down[totalUp]/(float)c;
if (thisUp>totalUp)
d_up[totalUp] +=1.0;// (1.0)/(float)c + (c-1)*d_up[totalUp]/(float)c;
// res[blockIdx.y] = counter/float(t+1) + t*res[blockIdx.y]/float(t+1);
// now for something crazy!!!
// we're going to count all the uppies and then put them all in order
totalUp=0;
for (int i=0;i<N;i++)
{
if (states[b * N + i] > 0.5)
totalUp++;
// states[b * N + i] = 0;
}
// totalUp=32;
/* int nc = 0.875 * totalUp;
float frac = float(totalUp-nc)/float(N-totalUp);
for (int i=0;i<nc;i++)
states[b * N + i] = 1;
for (int i=nc;i<N;i++)
if (curand_uniform(&d_rands[group_id])< frac)
states[b * N + i] = 1;
*/
// int i2 = totalUp + 0.5*(N-totalUp);
// states[b * N + i2] = 1;
//
for (int i=0;i<N;i++)
states2[b * N + i] = states[b * N + i];
}
//res[t * gridDim.y + blockIdx.y] = counter;
// if (t==0)
// res[blockIdx.y] = counter;
// else
// res[blockIdx.y] = counter/float(t+1) + t*res[blockIdx.y]/float(t+1);
}
}
__global__ void block_sum(const int *input, int *per_block_results, const size_t n)
{
extern __shared__ int sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
int x = 0;
if(i < n)
{
x = input[i];
}
sdata[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(int offset = blockDim.x / 2;
offset > 0;
offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block hav
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0)
{
per_block_results[blockIdx.x] = sdata[0];
}
}
void initRands(dim3 threadGrid, int numBlocks, curandState *state, int seed)
{
d_initRands<<< numBlocks, threadGrid >>>(state, seed);
if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" );
}
void advanceTimestep(dim3 threadGrid, int numBlocks, curandState *rands, float* wg, int* states, int* net, int N_x, int NL, int t)
{
int r = rand() / ( RAND_MAX / (N_x*N_x) );
d_updateStates<<< numBlocks, threadGrid >>>(states, net, wg, N_x, rands, NL, r);
if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" );
}
void recordData(dim3 threadGrid, int numBlocks, int* states, int* states2, curandState *rands, int N_x, float* d_up, float* d_down, int* d_upcount, int* d_downcount, int t)
{
d_recordData<<< numBlocks, threadGrid >>>(states, states2, rands, N_x, d_up, d_down, d_upcount, d_downcount, t);
if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" );
}
void countStates(int numThreads, int numBlocks, int* states, int* blockTotals, int N_ALL)
{
block_sum<<< numBlocks, numThreads, numThreads * sizeof(int) >>>(states, blockTotals, N_ALL);
if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" );
}
|
862e792a5a3f9608f7725ea702b3f0c30e183e86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@precisions mixed zc -> ds
*/
#include "magma_internal.h"
#define NB 64
// TODO check precision, as in zlag2c?
__global__ void
zclaswp_kernel(
int n,
magmaDoubleComplex *A, int lda,
magmaFloatComplex *SA, int ldsa,
int m, const magma_int_t *ipiv)
{
int ind = blockIdx.x*NB + threadIdx.x;
int newind;
magmaFloatComplex res;
if (ind < m) {
SA += ind;
ipiv += ind;
newind = ipiv[0];
for (int i=0; i < n; i++) {
res = MAGMA_C_MAKE( (float)MAGMA_Z_REAL( A[newind+i*lda] ),
(float)MAGMA_Z_IMAG( A[newind+i*lda] ));
SA[i*ldsa] = res;
}
}
}
__global__ void
zclaswp_inv_kernel(
int n,
magmaDoubleComplex *A, int lda,
magmaFloatComplex *SA, int ldsa,
int m, const magma_int_t *ipiv)
{
int ind = blockIdx.x*NB + threadIdx.x;
int newind;
magmaDoubleComplex res;
if (ind < m) {
A += ind;
ipiv += ind;
newind = ipiv[0];
for (int i=0; i < n; i++) {
res = MAGMA_Z_MAKE( (double)MAGMA_C_REAL( SA[newind+i*ldsa] ),
(double)MAGMA_C_IMAG( SA[newind+i*ldsa] ));
A[i*lda] = res;
}
}
}
/**
Purpose
-------
Row i of A is cast to single precision in row ipiv[i] of SA (incx > 0), or
row i of SA is cast to double precision in row ipiv[i] of A (incx < 0),
for 0 <= i < M.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix A.
@param[in,out]
A DOUBLE PRECISION array on the GPU, dimension (LDA,N)
On entry, the M-by-N matrix to which the row interchanges will be applied.
TODO update docs
@param[in]
lda INTEGER.
LDA specifies the leading dimension of A.
@param[in,out]
SA REAL array on the GPU, dimension (LDSA,N)
On exit, the single precision, permuted matrix.
TODO update docs
@param[in]
ldsa INTEGER.
LDSA specifies the leading dimension of SA.
@param[in]
m The number of rows to be interchanged.
@param[in]
ipiv INTEGER array on the GPU, dimension (M)
The vector of pivot indices. Row i of A is cast to single
precision in row ipiv[i] of SA, for 0 <= i < m.
@param[in]
incx INTEGER
If INCX is negative, the pivots are applied in reverse order,
otherwise in straight-forward order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zclaswp_q(
magma_int_t n,
magmaDoubleComplex_ptr A, magma_int_t lda,
magmaFloatComplex_ptr SA, magma_int_t ldsa,
magma_int_t m,
const magma_int_t *ipiv, magma_int_t incx,
magma_queue_t queue )
{
int blocks = magma_ceildiv( m, NB );
dim3 grid( blocks );
dim3 threads( NB );
if (incx >= 0)
hipLaunchKernelGGL(( zclaswp_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, SA, ldsa, m, ipiv);
else
hipLaunchKernelGGL(( zclaswp_inv_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, SA, ldsa, m, ipiv);
}
| 862e792a5a3f9608f7725ea702b3f0c30e183e86.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@precisions mixed zc -> ds
*/
#include "magma_internal.h"
#define NB 64
// TODO check precision, as in zlag2c?
__global__ void
zclaswp_kernel(
int n,
magmaDoubleComplex *A, int lda,
magmaFloatComplex *SA, int ldsa,
int m, const magma_int_t *ipiv)
{
int ind = blockIdx.x*NB + threadIdx.x;
int newind;
magmaFloatComplex res;
if (ind < m) {
SA += ind;
ipiv += ind;
newind = ipiv[0];
for (int i=0; i < n; i++) {
res = MAGMA_C_MAKE( (float)MAGMA_Z_REAL( A[newind+i*lda] ),
(float)MAGMA_Z_IMAG( A[newind+i*lda] ));
SA[i*ldsa] = res;
}
}
}
__global__ void
zclaswp_inv_kernel(
int n,
magmaDoubleComplex *A, int lda,
magmaFloatComplex *SA, int ldsa,
int m, const magma_int_t *ipiv)
{
int ind = blockIdx.x*NB + threadIdx.x;
int newind;
magmaDoubleComplex res;
if (ind < m) {
A += ind;
ipiv += ind;
newind = ipiv[0];
for (int i=0; i < n; i++) {
res = MAGMA_Z_MAKE( (double)MAGMA_C_REAL( SA[newind+i*ldsa] ),
(double)MAGMA_C_IMAG( SA[newind+i*ldsa] ));
A[i*lda] = res;
}
}
}
/**
Purpose
-------
Row i of A is cast to single precision in row ipiv[i] of SA (incx > 0), or
row i of SA is cast to double precision in row ipiv[i] of A (incx < 0),
for 0 <= i < M.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix A.
@param[in,out]
A DOUBLE PRECISION array on the GPU, dimension (LDA,N)
On entry, the M-by-N matrix to which the row interchanges will be applied.
TODO update docs
@param[in]
lda INTEGER.
LDA specifies the leading dimension of A.
@param[in,out]
SA REAL array on the GPU, dimension (LDSA,N)
On exit, the single precision, permuted matrix.
TODO update docs
@param[in]
ldsa INTEGER.
LDSA specifies the leading dimension of SA.
@param[in]
m The number of rows to be interchanged.
@param[in]
ipiv INTEGER array on the GPU, dimension (M)
The vector of pivot indices. Row i of A is cast to single
precision in row ipiv[i] of SA, for 0 <= i < m.
@param[in]
incx INTEGER
If INCX is negative, the pivots are applied in reverse order,
otherwise in straight-forward order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zclaswp_q(
magma_int_t n,
magmaDoubleComplex_ptr A, magma_int_t lda,
magmaFloatComplex_ptr SA, magma_int_t ldsa,
magma_int_t m,
const magma_int_t *ipiv, magma_int_t incx,
magma_queue_t queue )
{
int blocks = magma_ceildiv( m, NB );
dim3 grid( blocks );
dim3 threads( NB );
if (incx >= 0)
zclaswp_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(n, A, lda, SA, ldsa, m, ipiv);
else
zclaswp_inv_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(n, A, lda, SA, ldsa, m, ipiv);
}
|
ead660722031918a745cd71d04b352bb2c23705b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/fast_gelu_impl.cuh"
#include "include/hip/hip_fp16.h"
template <typename T>
__global__ void FastGeluKernel(size_t size, T *input_addr, T *output_addr) {
// formula:
// fast_gelu(x) = x / (1 + exp(-1.702 * abs(x))) * (exp(0.851 * (x - abs(x))))
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
float x = input_addr[pos];
float up = ::exp(0.851 * (x - std::abs(x)));
float down = 1 + ::exp(-1.702 * std::abs(x));
output_addr[pos] = x / down * up;
}
}
template <>
__global__ void FastGeluKernel(size_t size, half *input_addr, half *output_addr) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
half x = input_addr[pos];
half up = hexp(half(0.851) * (x - half(std::abs(__half2float(x)))));
half down = half(1) + hexp(half(-1.702) * half(std::abs(__half2float(x))));
output_addr[pos] = x / down * up;
}
}
template <>
__global__ void FastGeluKernel(size_t size, half2 *input_addr, half2 *output_addr) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
half2 x = input_addr[pos];
float2 float2_x = __half22float2(x);
float2 abs_x_res;
abs_x_res.x = std::abs(float2_x.x);
abs_x_res.y = std::abs(float2_x.y);
half2 half2_x_abs = __float22half2_rn(abs_x_res);
half2 up = h2exp(half2(0.851, 0.851) * (x - half2_x_abs));
half2 down = half2(1, 1) + h2exp(half2(-1.702, -1.702) * half2_x_abs);
output_addr[pos] = x / down * up;
}
}
template <typename T>
void FastGelu(size_t size, T *input_addr, T *output_addr, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( FastGeluKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, input_addr, output_addr);
}
template <>
void FastGelu(size_t size, half *input_addr, half *output_addr, hipStream_t cuda_stream) {
if (size % 2 == 0) {
hipLaunchKernelGGL(( FastGeluKernel<half2>), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream,
size / 2, reinterpret_cast<half2 *>(input_addr), reinterpret_cast<half2 *>(output_addr));
} else {
hipLaunchKernelGGL(( FastGeluKernel<half>), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, input_addr, output_addr);
}
}
template <typename T>
__global__ void FastGeluGradKernel(size_t size, T *dy_addr, T *x_addr, T *dx_addr) {
// formula:
// dx = dy * y'
// y' = div_up / div_down
// div_up = exp(-1.702 * x) + 1.702 * x * exp(-1.702 * x) + exp(1.702 * (x - abs(x)))
// div_down = (exp(-1.702 * x) + 1)^2
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
T x = x_addr[pos];
T exp_res = ::exp(-1.702 * x);
T div_up = exp_res + static_cast<T>(1.702) * x * exp_res + static_cast<T>(1);
T div_down = (exp_res + static_cast<T>(1)) * (exp_res + static_cast<T>(1));
T y_res = div_up / div_down;
dx_addr[pos] = dy_addr[pos] * y_res;
}
}
template <typename T>
__global__ void FastGeluGradKernel(size_t size, half2 *dy_addr, half2 *x_addr, half2 *dx_addr) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
half2 x = x_addr[pos];
half2 exp_res = h2exp(half2(-1.702, -1.702) * x);
half2 div_up = exp_res + half2(1.702, 1.702) * x * exp_res + half2(1, 1);
half2 div_down = (exp_res + half2(1, 1)) * (exp_res + half2(1, 1));
half2 y_res = div_up / div_down;
dx_addr[pos] = dy_addr[pos] * y_res;
}
}
template <typename T>
__global__ void FastGeluGradKernel(size_t size, half *dy_addr, half *x_addr, half *dx_addr) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
half x = x_addr[pos];
half exp_res = hexp(half(-1.702) * x);
half div_up = exp_res + half(1.702) * x * exp_res + half(1);
half div_down = (exp_res + half(1)) * (exp_res + half(1));
half y_res = div_up / div_down;
dx_addr[pos] = dy_addr[pos] * y_res;
}
}
template <typename T>
void FastGeluGradKernel(size_t size, T *dy_addr, T *x_addr, T *dx_addr, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( FastGeluGradKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, dy_addr, x_addr, dx_addr);
}
template <>
void FastGeluGradKernel(size_t size, half *dy_addr, half *x_addr, half *dx_addr, hipStream_t cuda_stream) {
if (size % 2 == 0) {
hipLaunchKernelGGL(( FastGeluGradKernel<half2>), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream,
size / 2, reinterpret_cast<half2 *>(dy_addr), reinterpret_cast<half2 *>(x_addr),
reinterpret_cast<half2 *>(dx_addr));
} else {
hipLaunchKernelGGL(( FastGeluGradKernel<half>), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, dy_addr, x_addr, dx_addr);
}
}
template CUDA_LIB_EXPORT void FastGelu(size_t size, float *input_addr, float *output_addr, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void FastGelu(size_t size, half *input_addr, half *output_addr, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void FastGeluGradKernel(size_t size, float *dy_addr, float *x_addr, float *dx_addr,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void FastGeluGradKernel(size_t size, half *dy_addr, half *x_addr, half *dx_addr,
hipStream_t cuda_stream);
| ead660722031918a745cd71d04b352bb2c23705b.cu | /**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/fast_gelu_impl.cuh"
#include "include/cuda_fp16.h"
template <typename T>
__global__ void FastGeluKernel(size_t size, T *input_addr, T *output_addr) {
// formula:
// fast_gelu(x) = x / (1 + exp(-1.702 * abs(x))) * (exp(0.851 * (x - abs(x))))
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
float x = input_addr[pos];
float up = std::exp(0.851 * (x - std::abs(x)));
float down = 1 + std::exp(-1.702 * std::abs(x));
output_addr[pos] = x / down * up;
}
}
template <>
__global__ void FastGeluKernel(size_t size, half *input_addr, half *output_addr) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
half x = input_addr[pos];
half up = hexp(half(0.851) * (x - half(std::abs(__half2float(x)))));
half down = half(1) + hexp(half(-1.702) * half(std::abs(__half2float(x))));
output_addr[pos] = x / down * up;
}
}
template <>
__global__ void FastGeluKernel(size_t size, half2 *input_addr, half2 *output_addr) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
half2 x = input_addr[pos];
float2 float2_x = __half22float2(x);
float2 abs_x_res;
abs_x_res.x = std::abs(float2_x.x);
abs_x_res.y = std::abs(float2_x.y);
half2 half2_x_abs = __float22half2_rn(abs_x_res);
half2 up = h2exp(half2(0.851, 0.851) * (x - half2_x_abs));
half2 down = half2(1, 1) + h2exp(half2(-1.702, -1.702) * half2_x_abs);
output_addr[pos] = x / down * up;
}
}
template <typename T>
void FastGelu(size_t size, T *input_addr, T *output_addr, cudaStream_t cuda_stream) {
FastGeluKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, input_addr, output_addr);
}
template <>
void FastGelu(size_t size, half *input_addr, half *output_addr, cudaStream_t cuda_stream) {
if (size % 2 == 0) {
FastGeluKernel<half2><<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(
size / 2, reinterpret_cast<half2 *>(input_addr), reinterpret_cast<half2 *>(output_addr));
} else {
FastGeluKernel<half><<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, input_addr, output_addr);
}
}
template <typename T>
__global__ void FastGeluGradKernel(size_t size, T *dy_addr, T *x_addr, T *dx_addr) {
// formula:
// dx = dy * y'
// y' = div_up / div_down
// div_up = exp(-1.702 * x) + 1.702 * x * exp(-1.702 * x) + exp(1.702 * (x - abs(x)))
// div_down = (exp(-1.702 * x) + 1)^2
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
T x = x_addr[pos];
T exp_res = std::exp(-1.702 * x);
T div_up = exp_res + static_cast<T>(1.702) * x * exp_res + static_cast<T>(1);
T div_down = (exp_res + static_cast<T>(1)) * (exp_res + static_cast<T>(1));
T y_res = div_up / div_down;
dx_addr[pos] = dy_addr[pos] * y_res;
}
}
template <typename T>
__global__ void FastGeluGradKernel(size_t size, half2 *dy_addr, half2 *x_addr, half2 *dx_addr) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
half2 x = x_addr[pos];
half2 exp_res = h2exp(half2(-1.702, -1.702) * x);
half2 div_up = exp_res + half2(1.702, 1.702) * x * exp_res + half2(1, 1);
half2 div_down = (exp_res + half2(1, 1)) * (exp_res + half2(1, 1));
half2 y_res = div_up / div_down;
dx_addr[pos] = dy_addr[pos] * y_res;
}
}
template <typename T>
__global__ void FastGeluGradKernel(size_t size, half *dy_addr, half *x_addr, half *dx_addr) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
half x = x_addr[pos];
half exp_res = hexp(half(-1.702) * x);
half div_up = exp_res + half(1.702) * x * exp_res + half(1);
half div_down = (exp_res + half(1)) * (exp_res + half(1));
half y_res = div_up / div_down;
dx_addr[pos] = dy_addr[pos] * y_res;
}
}
template <typename T>
void FastGeluGradKernel(size_t size, T *dy_addr, T *x_addr, T *dx_addr, cudaStream_t cuda_stream) {
FastGeluGradKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, dy_addr, x_addr, dx_addr);
}
template <>
void FastGeluGradKernel(size_t size, half *dy_addr, half *x_addr, half *dx_addr, cudaStream_t cuda_stream) {
if (size % 2 == 0) {
FastGeluGradKernel<half2><<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(
size / 2, reinterpret_cast<half2 *>(dy_addr), reinterpret_cast<half2 *>(x_addr),
reinterpret_cast<half2 *>(dx_addr));
} else {
FastGeluGradKernel<half><<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, dy_addr, x_addr, dx_addr);
}
}
template CUDA_LIB_EXPORT void FastGelu(size_t size, float *input_addr, float *output_addr, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void FastGelu(size_t size, half *input_addr, half *output_addr, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void FastGeluGradKernel(size_t size, float *dy_addr, float *x_addr, float *dx_addr,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void FastGeluGradKernel(size_t size, half *dy_addr, half *x_addr, half *dx_addr,
cudaStream_t cuda_stream);
|
c9e774f17813bcf6a0a76aec5a20c0e17fae4363.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//no performance difference if using float Mono input instead of float4 RGBA
//texture<float, hipTextureType2D, hipReadModeElementType> inTex;
//g_odata[offset] = tex2D(inTex, xc, yc);
texture<float4, hipTextureType2D, hipReadModeElementType> inTex;
surface<void, cudaSurfaceType2D> outputSurface;
__global__ void arrayToData(float *g_odata, uint* keys, int imgw, int imgh)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * imgw;
if (x < imgw && y < imgh) {
float xc = x + 0.5;
float yc = y + 0.5;
g_odata[offset] = tex2D(inTex, xc, yc).x;
keys[offset] = offset;
}
} | c9e774f17813bcf6a0a76aec5a20c0e17fae4363.cu | #include "includes.h"
//no performance difference if using float Mono input instead of float4 RGBA
//texture<float, cudaTextureType2D, cudaReadModeElementType> inTex;
//g_odata[offset] = tex2D(inTex, xc, yc);
texture<float4, cudaTextureType2D, cudaReadModeElementType> inTex;
surface<void, cudaSurfaceType2D> outputSurface;
__global__ void arrayToData(float *g_odata, uint* keys, int imgw, int imgh)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * imgw;
if (x < imgw && y < imgh) {
float xc = x + 0.5;
float yc = y + 0.5;
g_odata[offset] = tex2D(inTex, xc, yc).x;
keys[offset] = offset;
}
} |
2f25cbd9d5ccd12954a7320e9e3847395c6d5a4e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "markSegments.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned short *d_mark = NULL;
hipMalloc(&d_mark, XSIZE*YSIZE);
unsigned int circuitGraphEdgeCount = 1;
unsigned int *d_cg_edge_start = NULL;
hipMalloc(&d_cg_edge_start, XSIZE*YSIZE);
unsigned int *d_cedgeCount = NULL;
hipMalloc(&d_cedgeCount, XSIZE*YSIZE);
unsigned int circuitVertexSize = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
markSegments), dim3(gridBlock),dim3(threadBlock), 0, 0, d_mark,circuitGraphEdgeCount,d_cg_edge_start,d_cedgeCount,circuitVertexSize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
markSegments), dim3(gridBlock),dim3(threadBlock), 0, 0, d_mark,circuitGraphEdgeCount,d_cg_edge_start,d_cedgeCount,circuitVertexSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
markSegments), dim3(gridBlock),dim3(threadBlock), 0, 0, d_mark,circuitGraphEdgeCount,d_cg_edge_start,d_cedgeCount,circuitVertexSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2f25cbd9d5ccd12954a7320e9e3847395c6d5a4e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "markSegments.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned short *d_mark = NULL;
cudaMalloc(&d_mark, XSIZE*YSIZE);
unsigned int circuitGraphEdgeCount = 1;
unsigned int *d_cg_edge_start = NULL;
cudaMalloc(&d_cg_edge_start, XSIZE*YSIZE);
unsigned int *d_cedgeCount = NULL;
cudaMalloc(&d_cedgeCount, XSIZE*YSIZE);
unsigned int circuitVertexSize = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
markSegments<<<gridBlock,threadBlock>>>(d_mark,circuitGraphEdgeCount,d_cg_edge_start,d_cedgeCount,circuitVertexSize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
markSegments<<<gridBlock,threadBlock>>>(d_mark,circuitGraphEdgeCount,d_cg_edge_start,d_cedgeCount,circuitVertexSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
markSegments<<<gridBlock,threadBlock>>>(d_mark,circuitGraphEdgeCount,d_cg_edge_start,d_cedgeCount,circuitVertexSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c29468cf47d9ac5678fb3667c3423e2df6cd5c45.hip | // !!! This is a file automatically generated by hipify!!!
#include "CudaUtil.cuh"
#include "opencv2/core.hpp"
#include "opencv2/core/cuda.hpp"
#include "opencv2/core/cuda_stream_accessor.hpp"
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include <algorithm>
#define PYR_DOWN_BLOCK_SIZE 256
namespace cv { namespace cuda { namespace device
{
__device__ inline int3 operator<<(int3 vec, unsigned numShiftBits)
{
int3 ret;
ret.x = vec.x << numShiftBits;
ret.y = vec.y << numShiftBits;
ret.z = vec.z << numShiftBits;
return ret;
}
__device__ inline int3 operator>>(int3 vec, unsigned numShiftBits)
{
int3 ret;
ret.x = vec.x >> numShiftBits;
ret.y = vec.y >> numShiftBits;
ret.z = vec.z >> numShiftBits;
return ret;
}
__device__ inline int3 roundCast8(int3 vec)
{
int3 ret;
ret.x = (vec.x + 128) >> 8;
ret.y = (vec.y + 128) >> 8;
ret.z = (vec.z + 128) >> 8;
return ret;
}
__device__ inline int roundCast8(int val)
{
return (val + 128) >> 8;
}
__device__ inline int3 roundCast6(int3 vec)
{
int3 ret;
ret.x = (vec.x + 32) >> 6;
ret.y = (vec.y + 32) >> 6;
ret.z = (vec.z + 32) >> 6;
return ret;
}
__device__ inline int roundCast6(int val)
{
return (val + 32) >> 6;
}
__global__ void pyrDown32SC3(const PtrStepSz<int3> src, PtrStep<int3> dst,
const BrdColReflect101<int3> rb, const BrdRowReflect101<int3> cb, bool origScale, int dst_cols)
{
__shared__ int3 smem[PYR_DOWN_BLOCK_SIZE + 4];
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y;
const int src_y = 2 * y;
if (src_y >= 2 && src_y < src.rows - 2 && x >= 2 && x < src.cols - 2)
{
{
int3 sum;
sum = src(src_y - 2, x);
sum = sum + 4 * src(src_y - 1, x);
sum = sum + 6 * src(src_y, x);
sum = sum + 4 * src(src_y + 1, x);
sum = sum + src(src_y + 2, x);
smem[2 + threadIdx.x] = sum;
}
if (threadIdx.x < 2)
{
const int left_x = x - 2;
int3 sum;
sum = src(src_y - 2, left_x);
sum = sum + 4 * src(src_y - 1, left_x);
sum = sum + 6 * src(src_y, left_x);
sum = sum + 4 * src(src_y + 1, left_x);
sum = sum + src(src_y + 2, left_x);
smem[threadIdx.x] = sum;
}
if (threadIdx.x > PYR_DOWN_BLOCK_SIZE - 3)
{
const int right_x = x + 2;
int3 sum;
sum = src(src_y - 2, right_x);
sum = sum + 4 * src(src_y - 1, right_x);
sum = sum + 6 * src(src_y, right_x);
sum = sum + 4 * src(src_y + 1, right_x);
sum = sum + src(src_y + 2, right_x);
smem[4 + threadIdx.x] = sum;
}
}
else
{
{
int3 sum;
sum = src(rb.idx_row_low(src_y - 2), cb.idx_col_high(x));
sum = sum + 4 * src(rb.idx_row_low(src_y - 1), cb.idx_col_high(x));
sum = sum + 6 * src(src_y, cb.idx_col_high(x));
sum = sum + 4 * src(rb.idx_row_high(src_y + 1), cb.idx_col_high(x));
sum = sum + src(rb.idx_row_high(src_y + 2), cb.idx_col_high(x));
smem[2 + threadIdx.x] = sum;
}
if (threadIdx.x < 2)
{
const int left_x = x - 2;
int3 sum;
sum = src(rb.idx_row_low(src_y - 2), cb.idx_col(left_x));
sum = sum + 4 * src(rb.idx_row_low(src_y - 1), cb.idx_col(left_x));
sum = sum + 6 * src(src_y, cb.idx_col(left_x));
sum = sum + 4 * src(rb.idx_row_high(src_y + 1), cb.idx_col(left_x));
sum = sum + src(rb.idx_row_high(src_y + 2), cb.idx_col(left_x));
smem[threadIdx.x] = sum;
}
if (threadIdx.x > PYR_DOWN_BLOCK_SIZE - 3)
{
const int right_x = x + 2;
int3 sum;
sum = src(rb.idx_row_low(src_y - 2), cb.idx_col_high(right_x));
sum = sum + 4 * src(rb.idx_row_low(src_y - 1), cb.idx_col_high(right_x));
sum = sum + 6 * src(src_y, cb.idx_col_high(right_x));
sum = sum + 4 * src(rb.idx_row_high(src_y + 1), cb.idx_col_high(right_x));
sum = sum + src(rb.idx_row_high(src_y + 2), cb.idx_col_high(right_x));
smem[4 + threadIdx.x] = sum;
}
}
__syncthreads();
if (threadIdx.x < PYR_DOWN_BLOCK_SIZE / 2)
{
const int tid2 = threadIdx.x * 2;
int3 sum;
sum = smem[2 + tid2 - 2];
sum = sum + 4 * smem[2 + tid2 - 1];
sum = sum + 6 * smem[2 + tid2 ];
sum = sum + 4 * smem[2 + tid2 + 1];
sum = sum + smem[2 + tid2 + 2];
const int dst_x = (blockIdx.x * blockDim.x + tid2) / 2;
if (dst_x < dst_cols)
dst.ptr(y)[dst_x] = origScale ? roundCast8(sum) : sum;
}
}
__global__ void pyrDown32SC1(const PtrStepSz<int> src, PtrStep<int> dst,
const BrdColReflect101<int> rb, const BrdRowReflect101<int> cb, bool origScale, int dst_cols)
{
__shared__ int smem[PYR_DOWN_BLOCK_SIZE + 4];
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y;
const int src_y = 2 * y;
if (src_y >= 2 && src_y < src.rows - 2 && x >= 2 && x < src.cols - 2)
{
{
int sum;
sum = src(src_y - 2, x);
sum = sum + 4 * src(src_y - 1, x);
sum = sum + 6 * src(src_y, x);
sum = sum + 4 * src(src_y + 1, x);
sum = sum + src(src_y + 2, x);
smem[2 + threadIdx.x] = sum;
}
if (threadIdx.x < 2)
{
const int left_x = x - 2;
int sum;
sum = src(src_y - 2, left_x);
sum = sum + 4 * src(src_y - 1, left_x);
sum = sum + 6 * src(src_y, left_x);
sum = sum + 4 * src(src_y + 1, left_x);
sum = sum + src(src_y + 2, left_x);
smem[threadIdx.x] = sum;
}
if (threadIdx.x > PYR_DOWN_BLOCK_SIZE - 3)
{
const int right_x = x + 2;
int sum;
sum = src(src_y - 2, right_x);
sum = sum + 4 * src(src_y - 1, right_x);
sum = sum + 6 * src(src_y, right_x);
sum = sum + 4 * src(src_y + 1, right_x);
sum = sum + src(src_y + 2, right_x);
smem[4 + threadIdx.x] = sum;
}
}
else
{
{
int sum;
sum = src(rb.idx_row_low(src_y - 2), cb.idx_col_high(x));
sum = sum + 4 * src(rb.idx_row_low(src_y - 1), cb.idx_col_high(x));
sum = sum + 6 * src(src_y, cb.idx_col_high(x));
sum = sum + 4 * src(rb.idx_row_high(src_y + 1), cb.idx_col_high(x));
sum = sum + src(rb.idx_row_high(src_y + 2), cb.idx_col_high(x));
smem[2 + threadIdx.x] = sum;
}
if (threadIdx.x < 2)
{
const int left_x = x - 2;
int sum;
sum = src(rb.idx_row_low(src_y - 2), cb.idx_col(left_x));
sum = sum + 4 * src(rb.idx_row_low(src_y - 1), cb.idx_col(left_x));
sum = sum + 6 * src(src_y, cb.idx_col(left_x));
sum = sum + 4 * src(rb.idx_row_high(src_y + 1), cb.idx_col(left_x));
sum = sum + src(rb.idx_row_high(src_y + 2), cb.idx_col(left_x));
smem[threadIdx.x] = sum;
}
if (threadIdx.x > PYR_DOWN_BLOCK_SIZE - 3)
{
const int right_x = x + 2;
int sum;
sum = src(rb.idx_row_low(src_y - 2), cb.idx_col_high(right_x));
sum = sum + 4 * src(rb.idx_row_low(src_y - 1), cb.idx_col_high(right_x));
sum = sum + 6 * src(src_y, cb.idx_col_high(right_x));
sum = sum + 4 * src(rb.idx_row_high(src_y + 1), cb.idx_col_high(right_x));
sum = sum + src(rb.idx_row_high(src_y + 2), cb.idx_col_high(right_x));
smem[4 + threadIdx.x] = sum;
}
}
__syncthreads();
if (threadIdx.x < PYR_DOWN_BLOCK_SIZE / 2)
{
const int tid2 = threadIdx.x * 2;
int sum;
sum = smem[2 + tid2 - 2];
sum = sum + 4 * smem[2 + tid2 - 1];
sum = sum + 6 * smem[2 + tid2 ];
sum = sum + 4 * smem[2 + tid2 + 1];
sum = sum + smem[2 + tid2 + 2];
const int dst_x = (blockIdx.x * blockDim.x + tid2) / 2;
if (dst_x < dst_cols)
dst.ptr(y)[dst_x] = origScale ? roundCast8(sum) : sum;
}
}
__global__ void pyrUp32SC3(const PtrStepSz<int3> src, PtrStepSz<int3> dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ int3 s_srcPatch[10][10];
__shared__ int3 s_dstPatch[20][16];
if (threadIdx.x < 10 && threadIdx.y < 10)
{
int srcx = static_cast<int>((blockIdx.x * blockDim.x) / 2 + threadIdx.x) - 1;
int srcy = static_cast<int>((blockIdx.y * blockDim.y) / 2 + threadIdx.y) - 1;
srcx = ::abs(srcx);
srcx = ::min(src.cols - 1, srcx);
srcy = ::abs(srcy);
srcy = ::min(src.rows - 1, srcy);
s_srcPatch[threadIdx.y][threadIdx.x] = src(srcy, srcx);
}
__syncthreads();
int3 sum = VecTraits<int3>::all(0);
const int evenFlag = static_cast<int>((threadIdx.x & 1) == 0);
const int oddFlag = static_cast<int>((threadIdx.x & 1) != 0);
const bool eveny = ((threadIdx.y & 1) == 0);
const int tidx = threadIdx.x;
if (eveny)
{
sum = sum + (evenFlag ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx - 2) >> 1)];
sum = sum + ( oddFlag * 4) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx - 1) >> 1)];
sum = sum + (evenFlag * 6) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx ) >> 1)];
sum = sum + ( oddFlag * 4) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx + 1) >> 1)];
sum = sum + (evenFlag ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx + 2) >> 1)];
}
s_dstPatch[2 + threadIdx.y][threadIdx.x] = sum;
if (threadIdx.y < 2)
{
sum = VecTraits<int3>::all(0);
if (eveny)
{
sum = sum + (evenFlag ) * s_srcPatch[0][1 + ((tidx - 2) >> 1)];
sum = sum + ( oddFlag * 4) * s_srcPatch[0][1 + ((tidx - 1) >> 1)];
sum = sum + (evenFlag * 6) * s_srcPatch[0][1 + ((tidx ) >> 1)];
sum = sum + ( oddFlag * 4) * s_srcPatch[0][1 + ((tidx + 1) >> 1)];
sum = sum + (evenFlag ) * s_srcPatch[0][1 + ((tidx + 2) >> 1)];
}
s_dstPatch[threadIdx.y][threadIdx.x] = sum;
}
if (threadIdx.y > 13)
{
sum = VecTraits<int3>::all(0);
if (eveny)
{
sum = sum + (evenFlag ) * s_srcPatch[9][1 + ((tidx - 2) >> 1)];
sum = sum + ( oddFlag * 4) * s_srcPatch[9][1 + ((tidx - 1) >> 1)];
sum = sum + (evenFlag * 6) * s_srcPatch[9][1 + ((tidx ) >> 1)];
sum = sum + ( oddFlag * 4) * s_srcPatch[9][1 + ((tidx + 1) >> 1)];
sum = sum + (evenFlag ) * s_srcPatch[9][1 + ((tidx + 2) >> 1)];
}
s_dstPatch[4 + threadIdx.y][threadIdx.x] = sum;
}
__syncthreads();
sum = VecTraits<int3>::all(0);
const int tidy = threadIdx.y;
sum = sum + s_dstPatch[2 + tidy - 2][threadIdx.x];
sum = sum + 4 * s_dstPatch[2 + tidy - 1][threadIdx.x];
sum = sum + 6 * s_dstPatch[2 + tidy ][threadIdx.x];
sum = sum + 4 * s_dstPatch[2 + tidy + 1][threadIdx.x];
sum = sum + s_dstPatch[2 + tidy + 2][threadIdx.x];
if (x < dst.cols && y < dst.rows)
dst(y, x) = roundCast6(sum);
}
__global__ void divide(const PtrStepSz<int3> srcImage, const PtrStepSz<int> srcAlpha,
PtrStepSz<int3> dstImage, PtrStepSz<int> dstAlpha)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < srcImage.cols && y < srcImage.rows)
{
int3 srcImageVal = srcImage(y, x);
int srcAlphaVal = srcAlpha(y, x);
if (srcAlphaVal)
{
dstImage(y, x) = ((srcImageVal << 8) - srcImageVal) / srcAlphaVal;
dstAlpha(y, x) = 256;
}
else
{
dstImage(y, x) = make_int3(0, 0, 0);
dstAlpha(y, x) = 0;
}
}
}
__global__ void accumulate(const PtrStepSz<int3> image, const PtrStepSz<int> weight, PtrStepSz<int3> dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < image.cols && y < image.rows)
{
dst(y, x) = dst(y, x) + image(y, x) * weight(y, x);
}
}
__global__ void normalize(PtrStepSz<int3> image)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < image.cols && y < image.rows)
{
image(y, x) = roundCast8(image(y, x));
}
}
__global__ void add(const PtrStepSz<int3> a, const PtrStepSz<int3> b, PtrStepSz<int3> c)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < a.cols && y < a.rows)
{
c(y, x) = a(y, x) + b(y, x);
}
}
__global__ void subtract(const PtrStepSz<int3> a, const PtrStepSz<int3> b, PtrStepSz<int3> c)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < a.cols && y < a.rows)
{
c(y, x) = a(y, x) - b(y, x);
}
}
__global__ void set(PtrStepSz<int> image, const PtrStepSz<unsigned char> mask, int val)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < image.cols && y < image.rows && mask(y, x))
{
image(y, x) = val;
}
}
}}}
void pyramidDown(const cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::Size dstSize, bool dstScaleBack)
{
CV_Assert(src.data && (src.type() == CV_32SC3 || src.type() == CV_32SC1));
int type = src.type();
if (dstSize == cv::Size())
{
dstSize.width = ((src.cols + 1) >> 1);
dstSize.height = ((src.rows + 1) >> 1);
}
dst.create(dstSize, type);
const dim3 block(PYR_DOWN_BLOCK_SIZE);
const dim3 grid(cv::cuda::device::divUp(src.cols, block.x), dst.rows);
if (type == CV_32SC3)
{
cv::cuda::device::BrdColReflect101<int3> rb(src.rows);
cv::cuda::device::BrdRowReflect101<int3> cb(src.cols);
hipLaunchKernelGGL(( cv::cuda::device::pyrDown32SC3), dim3(grid), dim3(block), 0, 0, src, dst, rb, cb, dstScaleBack, dst.cols);
}
else
{
cv::cuda::device::BrdColReflect101<int> rb(src.rows);
cv::cuda::device::BrdRowReflect101<int> cb(src.cols);
hipLaunchKernelGGL(( cv::cuda::device::pyrDown32SC1), dim3(grid), dim3(block), 0, 0, src, dst, rb, cb, dstScaleBack, dst.cols);
}
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipDeviceSynchronize());
}
void pyramidUp(const cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::Size dstSize)
{
CV_Assert(src.data && src.type() == CV_32SC3);
if (dstSize == cv::Size())
{
dstSize.width = (src.cols << 1);
dstSize.height = (src.rows << 1);
}
dst.create(dstSize, CV_32SC3);
const dim3 block(16, 16);
const dim3 grid(cv::cuda::device::divUp(dst.cols, block.x), cv::cuda::device::divUp(dst.rows, block.y));
hipLaunchKernelGGL(( cv::cuda::device::pyrUp32SC3), dim3(grid), dim3(block), 0, 0, src, dst);
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipDeviceSynchronize());
}
void divide(const cv::cuda::GpuMat& srcImage, const cv::cuda::GpuMat& srcAlpha,
cv::cuda::GpuMat& dstImage, cv::cuda::GpuMat& dstAlpha)
{
CV_Assert(srcImage.data && srcImage.type() == CV_32SC3 &&
srcAlpha.data && srcAlpha.type() == CV_32SC1 &&
srcImage.size() == srcAlpha.size());
dstImage.create(srcImage.size(), CV_32SC3);
dstAlpha.create(srcAlpha.size(), CV_32SC1);
const dim3 block(256);
const dim3 grid(cv::cuda::device::divUp(srcImage.cols, block.x), cv::cuda::device::divUp(srcImage.rows, block.y));
hipLaunchKernelGGL(( cv::cuda::device::divide), dim3(grid), dim3(block), 0, 0, srcImage, srcAlpha, dstImage, dstAlpha);
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipDeviceSynchronize());
}
void accumulate(const cv::cuda::GpuMat& image, const cv::cuda::GpuMat& weight, cv::cuda::GpuMat& dst)
{
CV_Assert(image.data && image.type() == CV_32SC3 &&
weight.data && weight.type() == CV_32SC1 &&
dst.data && dst.type() == CV_32SC3 &&
image.size() == weight.size() && image.size() == dst.size());
const dim3 block(32, 8);
const dim3 grid(cv::cuda::device::divUp(image.cols, block.x), cv::cuda::device::divUp(image.rows, block.y));
hipLaunchKernelGGL(( cv::cuda::device::accumulate), dim3(grid), dim3(block), 0, 0, image, weight, dst);
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipDeviceSynchronize());
}
void normalize(cv::cuda::GpuMat& image)
{
CV_Assert(image.data && image.type() == CV_32SC3);
const dim3 block(32, 8);
const dim3 grid(cv::cuda::device::divUp(image.cols, block.x), cv::cuda::device::divUp(image.rows, block.y));
hipLaunchKernelGGL(( cv::cuda::device::normalize), dim3(grid), dim3(block), 0, 0, image);
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipDeviceSynchronize());
}
void add(const cv::cuda::GpuMat& a, const cv::cuda::GpuMat& b, cv::cuda::GpuMat& c)
{
CV_Assert(a.data && a.type() == CV_32SC3
&& b.data && b.type() == CV_32SC3&&
a.size() == b.size());
c.create(a.size(), CV_32SC3);
const dim3 block(32, 8);
const dim3 grid(cv::cuda::device::divUp(a.cols, block.x), cv::cuda::device::divUp(a.rows, block.y));
hipLaunchKernelGGL(( cv::cuda::device::add), dim3(grid), dim3(block), 0, 0, a, b, c);
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipDeviceSynchronize());
}
void subtract(const cv::cuda::GpuMat& a, const cv::cuda::GpuMat& b, cv::cuda::GpuMat& c)
{
CV_Assert(a.data && a.type() == CV_32SC3
&& b.data && b.type() == CV_32SC3&&
a.size() == b.size());
c.create(a.size(), CV_32SC3);
const dim3 block(32, 8);
const dim3 grid(cv::cuda::device::divUp(a.cols, block.x), cv::cuda::device::divUp(a.rows, block.y));
hipLaunchKernelGGL(( cv::cuda::device::subtract), dim3(grid), dim3(block), 0, 0, a, b, c);
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipDeviceSynchronize());
}
void set(cv::cuda::GpuMat& image, const cv::cuda::GpuMat& mask, int val)
{
CV_Assert(image.data && image.type() == CV_32SC1 &&
mask.data && mask.type() == CV_8UC1 &&
image.size() == mask.size());
const dim3 block(32, 8);
const dim3 grid(cv::cuda::device::divUp(image.cols, block.x), cv::cuda::device::divUp(image.rows, block.y));
hipLaunchKernelGGL(( cv::cuda::device::set), dim3(grid), dim3(block), 0, 0, image, mask, val);
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipDeviceSynchronize());
} | c29468cf47d9ac5678fb3667c3423e2df6cd5c45.cu | #include "CudaUtil.cuh"
#include "opencv2/core.hpp"
#include "opencv2/core/cuda.hpp"
#include "opencv2/core/cuda_stream_accessor.hpp"
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <algorithm>
#define PYR_DOWN_BLOCK_SIZE 256
namespace cv { namespace cuda { namespace device
{
__device__ inline int3 operator<<(int3 vec, unsigned numShiftBits)
{
int3 ret;
ret.x = vec.x << numShiftBits;
ret.y = vec.y << numShiftBits;
ret.z = vec.z << numShiftBits;
return ret;
}
__device__ inline int3 operator>>(int3 vec, unsigned numShiftBits)
{
int3 ret;
ret.x = vec.x >> numShiftBits;
ret.y = vec.y >> numShiftBits;
ret.z = vec.z >> numShiftBits;
return ret;
}
__device__ inline int3 roundCast8(int3 vec)
{
int3 ret;
ret.x = (vec.x + 128) >> 8;
ret.y = (vec.y + 128) >> 8;
ret.z = (vec.z + 128) >> 8;
return ret;
}
__device__ inline int roundCast8(int val)
{
return (val + 128) >> 8;
}
__device__ inline int3 roundCast6(int3 vec)
{
int3 ret;
ret.x = (vec.x + 32) >> 6;
ret.y = (vec.y + 32) >> 6;
ret.z = (vec.z + 32) >> 6;
return ret;
}
__device__ inline int roundCast6(int val)
{
return (val + 32) >> 6;
}
__global__ void pyrDown32SC3(const PtrStepSz<int3> src, PtrStep<int3> dst,
const BrdColReflect101<int3> rb, const BrdRowReflect101<int3> cb, bool origScale, int dst_cols)
{
__shared__ int3 smem[PYR_DOWN_BLOCK_SIZE + 4];
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y;
const int src_y = 2 * y;
if (src_y >= 2 && src_y < src.rows - 2 && x >= 2 && x < src.cols - 2)
{
{
int3 sum;
sum = src(src_y - 2, x);
sum = sum + 4 * src(src_y - 1, x);
sum = sum + 6 * src(src_y, x);
sum = sum + 4 * src(src_y + 1, x);
sum = sum + src(src_y + 2, x);
smem[2 + threadIdx.x] = sum;
}
if (threadIdx.x < 2)
{
const int left_x = x - 2;
int3 sum;
sum = src(src_y - 2, left_x);
sum = sum + 4 * src(src_y - 1, left_x);
sum = sum + 6 * src(src_y, left_x);
sum = sum + 4 * src(src_y + 1, left_x);
sum = sum + src(src_y + 2, left_x);
smem[threadIdx.x] = sum;
}
if (threadIdx.x > PYR_DOWN_BLOCK_SIZE - 3)
{
const int right_x = x + 2;
int3 sum;
sum = src(src_y - 2, right_x);
sum = sum + 4 * src(src_y - 1, right_x);
sum = sum + 6 * src(src_y, right_x);
sum = sum + 4 * src(src_y + 1, right_x);
sum = sum + src(src_y + 2, right_x);
smem[4 + threadIdx.x] = sum;
}
}
else
{
{
int3 sum;
sum = src(rb.idx_row_low(src_y - 2), cb.idx_col_high(x));
sum = sum + 4 * src(rb.idx_row_low(src_y - 1), cb.idx_col_high(x));
sum = sum + 6 * src(src_y, cb.idx_col_high(x));
sum = sum + 4 * src(rb.idx_row_high(src_y + 1), cb.idx_col_high(x));
sum = sum + src(rb.idx_row_high(src_y + 2), cb.idx_col_high(x));
smem[2 + threadIdx.x] = sum;
}
if (threadIdx.x < 2)
{
const int left_x = x - 2;
int3 sum;
sum = src(rb.idx_row_low(src_y - 2), cb.idx_col(left_x));
sum = sum + 4 * src(rb.idx_row_low(src_y - 1), cb.idx_col(left_x));
sum = sum + 6 * src(src_y, cb.idx_col(left_x));
sum = sum + 4 * src(rb.idx_row_high(src_y + 1), cb.idx_col(left_x));
sum = sum + src(rb.idx_row_high(src_y + 2), cb.idx_col(left_x));
smem[threadIdx.x] = sum;
}
if (threadIdx.x > PYR_DOWN_BLOCK_SIZE - 3)
{
const int right_x = x + 2;
int3 sum;
sum = src(rb.idx_row_low(src_y - 2), cb.idx_col_high(right_x));
sum = sum + 4 * src(rb.idx_row_low(src_y - 1), cb.idx_col_high(right_x));
sum = sum + 6 * src(src_y, cb.idx_col_high(right_x));
sum = sum + 4 * src(rb.idx_row_high(src_y + 1), cb.idx_col_high(right_x));
sum = sum + src(rb.idx_row_high(src_y + 2), cb.idx_col_high(right_x));
smem[4 + threadIdx.x] = sum;
}
}
__syncthreads();
if (threadIdx.x < PYR_DOWN_BLOCK_SIZE / 2)
{
const int tid2 = threadIdx.x * 2;
int3 sum;
sum = smem[2 + tid2 - 2];
sum = sum + 4 * smem[2 + tid2 - 1];
sum = sum + 6 * smem[2 + tid2 ];
sum = sum + 4 * smem[2 + tid2 + 1];
sum = sum + smem[2 + tid2 + 2];
const int dst_x = (blockIdx.x * blockDim.x + tid2) / 2;
if (dst_x < dst_cols)
dst.ptr(y)[dst_x] = origScale ? roundCast8(sum) : sum;
}
}
__global__ void pyrDown32SC1(const PtrStepSz<int> src, PtrStep<int> dst,
const BrdColReflect101<int> rb, const BrdRowReflect101<int> cb, bool origScale, int dst_cols)
{
__shared__ int smem[PYR_DOWN_BLOCK_SIZE + 4];
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y;
const int src_y = 2 * y;
if (src_y >= 2 && src_y < src.rows - 2 && x >= 2 && x < src.cols - 2)
{
{
int sum;
sum = src(src_y - 2, x);
sum = sum + 4 * src(src_y - 1, x);
sum = sum + 6 * src(src_y, x);
sum = sum + 4 * src(src_y + 1, x);
sum = sum + src(src_y + 2, x);
smem[2 + threadIdx.x] = sum;
}
if (threadIdx.x < 2)
{
const int left_x = x - 2;
int sum;
sum = src(src_y - 2, left_x);
sum = sum + 4 * src(src_y - 1, left_x);
sum = sum + 6 * src(src_y, left_x);
sum = sum + 4 * src(src_y + 1, left_x);
sum = sum + src(src_y + 2, left_x);
smem[threadIdx.x] = sum;
}
if (threadIdx.x > PYR_DOWN_BLOCK_SIZE - 3)
{
const int right_x = x + 2;
int sum;
sum = src(src_y - 2, right_x);
sum = sum + 4 * src(src_y - 1, right_x);
sum = sum + 6 * src(src_y, right_x);
sum = sum + 4 * src(src_y + 1, right_x);
sum = sum + src(src_y + 2, right_x);
smem[4 + threadIdx.x] = sum;
}
}
else
{
{
int sum;
sum = src(rb.idx_row_low(src_y - 2), cb.idx_col_high(x));
sum = sum + 4 * src(rb.idx_row_low(src_y - 1), cb.idx_col_high(x));
sum = sum + 6 * src(src_y, cb.idx_col_high(x));
sum = sum + 4 * src(rb.idx_row_high(src_y + 1), cb.idx_col_high(x));
sum = sum + src(rb.idx_row_high(src_y + 2), cb.idx_col_high(x));
smem[2 + threadIdx.x] = sum;
}
if (threadIdx.x < 2)
{
const int left_x = x - 2;
int sum;
sum = src(rb.idx_row_low(src_y - 2), cb.idx_col(left_x));
sum = sum + 4 * src(rb.idx_row_low(src_y - 1), cb.idx_col(left_x));
sum = sum + 6 * src(src_y, cb.idx_col(left_x));
sum = sum + 4 * src(rb.idx_row_high(src_y + 1), cb.idx_col(left_x));
sum = sum + src(rb.idx_row_high(src_y + 2), cb.idx_col(left_x));
smem[threadIdx.x] = sum;
}
if (threadIdx.x > PYR_DOWN_BLOCK_SIZE - 3)
{
const int right_x = x + 2;
int sum;
sum = src(rb.idx_row_low(src_y - 2), cb.idx_col_high(right_x));
sum = sum + 4 * src(rb.idx_row_low(src_y - 1), cb.idx_col_high(right_x));
sum = sum + 6 * src(src_y, cb.idx_col_high(right_x));
sum = sum + 4 * src(rb.idx_row_high(src_y + 1), cb.idx_col_high(right_x));
sum = sum + src(rb.idx_row_high(src_y + 2), cb.idx_col_high(right_x));
smem[4 + threadIdx.x] = sum;
}
}
__syncthreads();
if (threadIdx.x < PYR_DOWN_BLOCK_SIZE / 2)
{
const int tid2 = threadIdx.x * 2;
int sum;
sum = smem[2 + tid2 - 2];
sum = sum + 4 * smem[2 + tid2 - 1];
sum = sum + 6 * smem[2 + tid2 ];
sum = sum + 4 * smem[2 + tid2 + 1];
sum = sum + smem[2 + tid2 + 2];
const int dst_x = (blockIdx.x * blockDim.x + tid2) / 2;
if (dst_x < dst_cols)
dst.ptr(y)[dst_x] = origScale ? roundCast8(sum) : sum;
}
}
__global__ void pyrUp32SC3(const PtrStepSz<int3> src, PtrStepSz<int3> dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ int3 s_srcPatch[10][10];
__shared__ int3 s_dstPatch[20][16];
if (threadIdx.x < 10 && threadIdx.y < 10)
{
int srcx = static_cast<int>((blockIdx.x * blockDim.x) / 2 + threadIdx.x) - 1;
int srcy = static_cast<int>((blockIdx.y * blockDim.y) / 2 + threadIdx.y) - 1;
srcx = ::abs(srcx);
srcx = ::min(src.cols - 1, srcx);
srcy = ::abs(srcy);
srcy = ::min(src.rows - 1, srcy);
s_srcPatch[threadIdx.y][threadIdx.x] = src(srcy, srcx);
}
__syncthreads();
int3 sum = VecTraits<int3>::all(0);
const int evenFlag = static_cast<int>((threadIdx.x & 1) == 0);
const int oddFlag = static_cast<int>((threadIdx.x & 1) != 0);
const bool eveny = ((threadIdx.y & 1) == 0);
const int tidx = threadIdx.x;
if (eveny)
{
sum = sum + (evenFlag ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx - 2) >> 1)];
sum = sum + ( oddFlag * 4) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx - 1) >> 1)];
sum = sum + (evenFlag * 6) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx ) >> 1)];
sum = sum + ( oddFlag * 4) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx + 1) >> 1)];
sum = sum + (evenFlag ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx + 2) >> 1)];
}
s_dstPatch[2 + threadIdx.y][threadIdx.x] = sum;
if (threadIdx.y < 2)
{
sum = VecTraits<int3>::all(0);
if (eveny)
{
sum = sum + (evenFlag ) * s_srcPatch[0][1 + ((tidx - 2) >> 1)];
sum = sum + ( oddFlag * 4) * s_srcPatch[0][1 + ((tidx - 1) >> 1)];
sum = sum + (evenFlag * 6) * s_srcPatch[0][1 + ((tidx ) >> 1)];
sum = sum + ( oddFlag * 4) * s_srcPatch[0][1 + ((tidx + 1) >> 1)];
sum = sum + (evenFlag ) * s_srcPatch[0][1 + ((tidx + 2) >> 1)];
}
s_dstPatch[threadIdx.y][threadIdx.x] = sum;
}
if (threadIdx.y > 13)
{
sum = VecTraits<int3>::all(0);
if (eveny)
{
sum = sum + (evenFlag ) * s_srcPatch[9][1 + ((tidx - 2) >> 1)];
sum = sum + ( oddFlag * 4) * s_srcPatch[9][1 + ((tidx - 1) >> 1)];
sum = sum + (evenFlag * 6) * s_srcPatch[9][1 + ((tidx ) >> 1)];
sum = sum + ( oddFlag * 4) * s_srcPatch[9][1 + ((tidx + 1) >> 1)];
sum = sum + (evenFlag ) * s_srcPatch[9][1 + ((tidx + 2) >> 1)];
}
s_dstPatch[4 + threadIdx.y][threadIdx.x] = sum;
}
__syncthreads();
sum = VecTraits<int3>::all(0);
const int tidy = threadIdx.y;
sum = sum + s_dstPatch[2 + tidy - 2][threadIdx.x];
sum = sum + 4 * s_dstPatch[2 + tidy - 1][threadIdx.x];
sum = sum + 6 * s_dstPatch[2 + tidy ][threadIdx.x];
sum = sum + 4 * s_dstPatch[2 + tidy + 1][threadIdx.x];
sum = sum + s_dstPatch[2 + tidy + 2][threadIdx.x];
if (x < dst.cols && y < dst.rows)
dst(y, x) = roundCast6(sum);
}
__global__ void divide(const PtrStepSz<int3> srcImage, const PtrStepSz<int> srcAlpha,
PtrStepSz<int3> dstImage, PtrStepSz<int> dstAlpha)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < srcImage.cols && y < srcImage.rows)
{
int3 srcImageVal = srcImage(y, x);
int srcAlphaVal = srcAlpha(y, x);
if (srcAlphaVal)
{
dstImage(y, x) = ((srcImageVal << 8) - srcImageVal) / srcAlphaVal;
dstAlpha(y, x) = 256;
}
else
{
dstImage(y, x) = make_int3(0, 0, 0);
dstAlpha(y, x) = 0;
}
}
}
__global__ void accumulate(const PtrStepSz<int3> image, const PtrStepSz<int> weight, PtrStepSz<int3> dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < image.cols && y < image.rows)
{
dst(y, x) = dst(y, x) + image(y, x) * weight(y, x);
}
}
__global__ void normalize(PtrStepSz<int3> image)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < image.cols && y < image.rows)
{
image(y, x) = roundCast8(image(y, x));
}
}
__global__ void add(const PtrStepSz<int3> a, const PtrStepSz<int3> b, PtrStepSz<int3> c)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < a.cols && y < a.rows)
{
c(y, x) = a(y, x) + b(y, x);
}
}
__global__ void subtract(const PtrStepSz<int3> a, const PtrStepSz<int3> b, PtrStepSz<int3> c)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < a.cols && y < a.rows)
{
c(y, x) = a(y, x) - b(y, x);
}
}
__global__ void set(PtrStepSz<int> image, const PtrStepSz<unsigned char> mask, int val)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < image.cols && y < image.rows && mask(y, x))
{
image(y, x) = val;
}
}
}}}
void pyramidDown(const cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::Size dstSize, bool dstScaleBack)
{
CV_Assert(src.data && (src.type() == CV_32SC3 || src.type() == CV_32SC1));
int type = src.type();
if (dstSize == cv::Size())
{
dstSize.width = ((src.cols + 1) >> 1);
dstSize.height = ((src.rows + 1) >> 1);
}
dst.create(dstSize, type);
const dim3 block(PYR_DOWN_BLOCK_SIZE);
const dim3 grid(cv::cuda::device::divUp(src.cols, block.x), dst.rows);
if (type == CV_32SC3)
{
cv::cuda::device::BrdColReflect101<int3> rb(src.rows);
cv::cuda::device::BrdRowReflect101<int3> cb(src.cols);
cv::cuda::device::pyrDown32SC3<<<grid, block>>>(src, dst, rb, cb, dstScaleBack, dst.cols);
}
else
{
cv::cuda::device::BrdColReflect101<int> rb(src.rows);
cv::cuda::device::BrdRowReflect101<int> cb(src.cols);
cv::cuda::device::pyrDown32SC1<<<grid, block>>>(src, dst, rb, cb, dstScaleBack, dst.cols);
}
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
}
void pyramidUp(const cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::Size dstSize)
{
CV_Assert(src.data && src.type() == CV_32SC3);
if (dstSize == cv::Size())
{
dstSize.width = (src.cols << 1);
dstSize.height = (src.rows << 1);
}
dst.create(dstSize, CV_32SC3);
const dim3 block(16, 16);
const dim3 grid(cv::cuda::device::divUp(dst.cols, block.x), cv::cuda::device::divUp(dst.rows, block.y));
cv::cuda::device::pyrUp32SC3<<<grid, block>>>(src, dst);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
}
void divide(const cv::cuda::GpuMat& srcImage, const cv::cuda::GpuMat& srcAlpha,
cv::cuda::GpuMat& dstImage, cv::cuda::GpuMat& dstAlpha)
{
CV_Assert(srcImage.data && srcImage.type() == CV_32SC3 &&
srcAlpha.data && srcAlpha.type() == CV_32SC1 &&
srcImage.size() == srcAlpha.size());
dstImage.create(srcImage.size(), CV_32SC3);
dstAlpha.create(srcAlpha.size(), CV_32SC1);
const dim3 block(256);
const dim3 grid(cv::cuda::device::divUp(srcImage.cols, block.x), cv::cuda::device::divUp(srcImage.rows, block.y));
cv::cuda::device::divide<<<grid, block>>>(srcImage, srcAlpha, dstImage, dstAlpha);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
}
void accumulate(const cv::cuda::GpuMat& image, const cv::cuda::GpuMat& weight, cv::cuda::GpuMat& dst)
{
CV_Assert(image.data && image.type() == CV_32SC3 &&
weight.data && weight.type() == CV_32SC1 &&
dst.data && dst.type() == CV_32SC3 &&
image.size() == weight.size() && image.size() == dst.size());
const dim3 block(32, 8);
const dim3 grid(cv::cuda::device::divUp(image.cols, block.x), cv::cuda::device::divUp(image.rows, block.y));
cv::cuda::device::accumulate<<<grid, block>>>(image, weight, dst);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
}
void normalize(cv::cuda::GpuMat& image)
{
CV_Assert(image.data && image.type() == CV_32SC3);
const dim3 block(32, 8);
const dim3 grid(cv::cuda::device::divUp(image.cols, block.x), cv::cuda::device::divUp(image.rows, block.y));
cv::cuda::device::normalize<<<grid, block>>>(image);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
}
void add(const cv::cuda::GpuMat& a, const cv::cuda::GpuMat& b, cv::cuda::GpuMat& c)
{
CV_Assert(a.data && a.type() == CV_32SC3
&& b.data && b.type() == CV_32SC3&&
a.size() == b.size());
c.create(a.size(), CV_32SC3);
const dim3 block(32, 8);
const dim3 grid(cv::cuda::device::divUp(a.cols, block.x), cv::cuda::device::divUp(a.rows, block.y));
cv::cuda::device::add<<<grid, block>>>(a, b, c);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
}
void subtract(const cv::cuda::GpuMat& a, const cv::cuda::GpuMat& b, cv::cuda::GpuMat& c)
{
CV_Assert(a.data && a.type() == CV_32SC3
&& b.data && b.type() == CV_32SC3&&
a.size() == b.size());
c.create(a.size(), CV_32SC3);
const dim3 block(32, 8);
const dim3 grid(cv::cuda::device::divUp(a.cols, block.x), cv::cuda::device::divUp(a.rows, block.y));
cv::cuda::device::subtract<<<grid, block>>>(a, b, c);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
}
void set(cv::cuda::GpuMat& image, const cv::cuda::GpuMat& mask, int val)
{
CV_Assert(image.data && image.type() == CV_32SC1 &&
mask.data && mask.type() == CV_8UC1 &&
image.size() == mask.size());
const dim3 block(32, 8);
const dim3 grid(cv::cuda::device::divUp(image.cols, block.x), cv::cuda::device::divUp(image.rows, block.y));
cv::cuda::device::set<<<grid, block>>>(image, mask, val);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
} |
8cf10de9dffd1cdb16752dc48380e883a15aed8d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "nested_json.hpp"
#include <hash/hash_allocator.cuh>
#include <hash/helper_functions.cuh>
#include <io/utilities/hostdevice_vector.hpp>
#include <cudf/detail/hashing.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/scatter.cuh>
#include <cudf/detail/utilities/algorithm.cuh>
#include <cudf/detail/utilities/hash_functions.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <cuco/static_map.cuh>
#include <hipcub/hipcub.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <rmm/mr/device/polymorphic_allocator.hpp>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/fill.h>
#include <thrust/gather.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>
#include <thrust/remove.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/tabulate.h>
#include <thrust/transform.h>
#include <limits>
namespace cudf::io::json {
namespace detail {
// The node that a token represents
struct token_to_node {
__device__ auto operator()(PdaTokenT const token) -> NodeT
{
switch (token) {
case token_t::StructBegin: return NC_STRUCT;
case token_t::ListBegin: return NC_LIST;
case token_t::StringBegin: return NC_STR;
case token_t::ValueBegin:
return NC_STR; // NC_VAL;
// NV_VAL is removed because type inference and
// reduce_to_column_tree category collapsing takes care of this.
case token_t::FieldNameBegin: return NC_FN;
default: return NC_ERR;
};
}
};
// Convert token indices to node range for each valid node.
struct node_ranges {
device_span<PdaTokenT const> tokens;
device_span<SymbolOffsetT const> token_indices;
bool include_quote_char;
__device__ auto operator()(size_type i) -> thrust::tuple<SymbolOffsetT, SymbolOffsetT>
{
// Whether a token expects to be followed by its respective end-of-* token partner
auto const is_begin_of_section = [] __device__(PdaTokenT const token) {
switch (token) {
case token_t::StringBegin:
case token_t::ValueBegin:
case token_t::FieldNameBegin: return true;
default: return false;
};
};
// The end-of-* partner token for a given beginning-of-* token
auto const end_of_partner = [] __device__(PdaTokenT const token) {
switch (token) {
case token_t::StringBegin: return token_t::StringEnd;
case token_t::ValueBegin: return token_t::ValueEnd;
case token_t::FieldNameBegin: return token_t::FieldNameEnd;
default: return token_t::ErrorBegin;
};
};
// Includes quote char for end-of-string token or Skips the quote char for
// beginning-of-field-name token
auto const get_token_index = [include_quote_char = include_quote_char] __device__(
PdaTokenT const token, SymbolOffsetT const token_index) {
constexpr SymbolOffsetT quote_char_size = 1;
switch (token) {
// Strip off quote char included for StringBegin
case token_t::StringBegin: return token_index + (include_quote_char ? 0 : quote_char_size);
// Strip off or Include trailing quote char for string values for StringEnd
case token_t::StringEnd: return token_index + (include_quote_char ? quote_char_size : 0);
// Strip off quote char included for FieldNameBegin
case token_t::FieldNameBegin: return token_index + quote_char_size;
default: return token_index;
};
};
PdaTokenT const token = tokens[i];
// The section from the original JSON input that this token demarcates
SymbolOffsetT range_begin = get_token_index(token, token_indices[i]);
SymbolOffsetT range_end = range_begin + 1; // non-leaf, non-field nodes ignore this value.
if (is_begin_of_section(token)) {
if ((i + 1) < tokens.size() && end_of_partner(token) == tokens[i + 1]) {
// Update the range_end for this pair of tokens
range_end = get_token_index(tokens[i + 1], token_indices[i + 1]);
}
}
return thrust::make_tuple(range_begin, range_end);
}
};
/**
* @brief Returns stable sorted keys and its sorted order
*
* Uses cub stable radix sort. The order is internally generated, hence it saves a copy and memory.
* Since the key and order is returned, using double buffer helps to avoid extra copy to user
* provided output iterator.
*
* @tparam IndexType sorted order type
* @tparam KeyType key type
* @param keys keys to sort
* @param stream CUDA stream used for device memory operations and kernel launches.
* @return Sorted keys and indices producing that sorted order
*/
template <typename IndexType = size_t, typename KeyType>
std::pair<rmm::device_uvector<KeyType>, rmm::device_uvector<IndexType>> stable_sorted_key_order(
cudf::device_span<KeyType const> keys, rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
// Determine temporary device storage requirements
rmm::device_uvector<KeyType> keys_buffer1(keys.size(), stream);
rmm::device_uvector<KeyType> keys_buffer2(keys.size(), stream);
rmm::device_uvector<IndexType> order_buffer1(keys.size(), stream);
rmm::device_uvector<IndexType> order_buffer2(keys.size(), stream);
cub::DoubleBuffer<IndexType> order_buffer(order_buffer1.data(), order_buffer2.data());
cub::DoubleBuffer<KeyType> keys_buffer(keys_buffer1.data(), keys_buffer2.data());
size_t temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs(
nullptr, temp_storage_bytes, keys_buffer, order_buffer, keys.size());
rmm::device_buffer d_temp_storage(temp_storage_bytes, stream);
thrust::copy(rmm::exec_policy(stream), keys.begin(), keys.end(), keys_buffer1.begin());
thrust::sequence(rmm::exec_policy(stream), order_buffer1.begin(), order_buffer1.end());
hipcub::DeviceRadixSort::SortPairs(d_temp_storage.data(),
temp_storage_bytes,
keys_buffer,
order_buffer,
keys.size(),
0,
sizeof(KeyType) * 8,
stream.value());
return std::pair{keys_buffer.Current() == keys_buffer1.data() ? std::move(keys_buffer1)
: std::move(keys_buffer2),
order_buffer.Current() == order_buffer1.data() ? std::move(order_buffer1)
: std::move(order_buffer2)};
}
/**
* @brief Propagate parent node to siblings from first sibling.
*
* @param node_levels Node levels of each node
* @param parent_node_ids parent node ids initialized for first child of each push node,
* and other siblings are initialized to -1.
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
void propagate_parent_to_siblings(cudf::device_span<TreeDepthT const> node_levels,
cudf::device_span<NodeIndexT> parent_node_ids,
rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
auto [sorted_node_levels, sorted_order] = stable_sorted_key_order<size_type>(node_levels, stream);
// instead of gather, using permutation_iterator, which is ~17% faster
thrust::inclusive_scan_by_key(
rmm::exec_policy(stream),
sorted_node_levels.begin(),
sorted_node_levels.end(),
thrust::make_permutation_iterator(parent_node_ids.begin(), sorted_order.begin()),
thrust::make_permutation_iterator(parent_node_ids.begin(), sorted_order.begin()),
thrust::equal_to<TreeDepthT>{},
thrust::maximum<NodeIndexT>{});
}
// Generates a tree representation of the given tokens, token_indices.
tree_meta_t get_tree_representation(device_span<PdaTokenT const> tokens,
device_span<SymbolOffsetT const> token_indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
// Whether a token does represent a node in the tree representation
auto const is_node = [] __device__(PdaTokenT const token) -> bool {
switch (token) {
case token_t::StructBegin:
case token_t::ListBegin:
case token_t::StringBegin:
case token_t::ValueBegin:
case token_t::FieldNameBegin:
case token_t::ErrorBegin: return true;
default: return false;
};
};
// Whether the token pops from the parent node stack
auto const does_pop = [] __device__(PdaTokenT const token) -> bool {
switch (token) {
case token_t::StructMemberEnd:
case token_t::StructEnd:
case token_t::ListEnd: return true;
default: return false;
};
};
// Whether the token pushes onto the parent node stack
auto const does_push = [] __device__(PdaTokenT const token) -> bool {
switch (token) {
case token_t::FieldNameBegin:
case token_t::StructBegin:
case token_t::ListBegin: return true;
default: return false;
};
};
// Look for ErrorBegin and report the point of error.
if (auto const error_count =
thrust::count(rmm::exec_policy(stream), tokens.begin(), tokens.end(), token_t::ErrorBegin);
error_count > 0) {
auto const error_location =
thrust::find(rmm::exec_policy(stream), tokens.begin(), tokens.end(), token_t::ErrorBegin);
SymbolOffsetT error_index;
CUDF_CUDA_TRY(
hipMemcpyAsync(&error_index,
token_indices.data() + thrust::distance(tokens.begin(), error_location),
sizeof(SymbolOffsetT),
hipMemcpyDefault,
stream.value()));
stream.synchronize();
CUDF_FAIL("JSON Parser encountered an invalid format at location " +
std::to_string(error_index));
}
auto const num_tokens = tokens.size();
auto const num_nodes =
thrust::count_if(rmm::exec_policy(stream), tokens.begin(), tokens.end(), is_node);
// Node levels: transform_exclusive_scan, copy_if.
rmm::device_uvector<TreeDepthT> node_levels(num_nodes, stream, mr);
{
rmm::device_uvector<TreeDepthT> token_levels(num_tokens, stream);
auto const push_pop_it = thrust::make_transform_iterator(
tokens.begin(), [does_push, does_pop] __device__(PdaTokenT const token) -> size_type {
return does_push(token) - does_pop(token);
});
thrust::exclusive_scan(
rmm::exec_policy(stream), push_pop_it, push_pop_it + num_tokens, token_levels.begin());
auto const node_levels_end = cudf::detail::copy_if_safe(token_levels.begin(),
token_levels.end(),
tokens.begin(),
node_levels.begin(),
is_node,
stream);
CUDF_EXPECTS(thrust::distance(node_levels.begin(), node_levels_end) == num_nodes,
"node level count mismatch");
}
// Node parent ids:
// previous push node_id transform, stable sort by level, segmented scan with Max, reorder.
rmm::device_uvector<NodeIndexT> parent_node_ids(num_nodes, stream, mr);
// This block of code is generalized logical stack algorithm. TODO: make this a separate function.
{
rmm::device_uvector<NodeIndexT> node_token_ids(num_nodes, stream);
cudf::detail::copy_if_safe(thrust::make_counting_iterator<NodeIndexT>(0),
thrust::make_counting_iterator<NodeIndexT>(0) + num_tokens,
tokens.begin(),
node_token_ids.begin(),
is_node,
stream);
// previous push node_id
// if previous node is a push, then i-1
// if previous node is FE, then i-2 (returns FB's index)
// if previous node is SMB and its previous node is a push, then i-2
// eg. `{ SMB FB FE VB VE SME` -> `{` index as FB's parent.
// else -1
auto const first_childs_parent_token_id = [tokens_gpu =
tokens.begin()] __device__(auto i) -> NodeIndexT {
if (i <= 0) { return -1; }
if (tokens_gpu[i - 1] == token_t::StructBegin or tokens_gpu[i - 1] == token_t::ListBegin) {
return i - 1;
} else if (tokens_gpu[i - 1] == token_t::FieldNameEnd) {
return i - 2;
} else if (tokens_gpu[i - 1] == token_t::StructMemberBegin and
(tokens_gpu[i - 2] == token_t::StructBegin ||
tokens_gpu[i - 2] == token_t::ListBegin)) {
return i - 2;
} else {
return -1;
}
};
thrust::transform(
rmm::exec_policy(stream),
node_token_ids.begin(),
node_token_ids.end(),
parent_node_ids.begin(),
[node_ids_gpu = node_token_ids.begin(), num_nodes, first_childs_parent_token_id] __device__(
NodeIndexT const tid) -> NodeIndexT {
auto const pid = first_childs_parent_token_id(tid);
return pid < 0
? parent_node_sentinel
: thrust::lower_bound(thrust::seq, node_ids_gpu, node_ids_gpu + num_nodes, pid) -
node_ids_gpu;
// parent_node_sentinel is -1, useful for segmented max operation below
});
}
// Propagate parent node to siblings from first sibling - inplace.
propagate_parent_to_siblings(
cudf::device_span<TreeDepthT const>{node_levels.data(), node_levels.size()},
parent_node_ids,
stream);
// Node categories: copy_if with transform.
rmm::device_uvector<NodeT> node_categories(num_nodes, stream, mr);
auto const node_categories_it =
thrust::make_transform_output_iterator(node_categories.begin(), token_to_node{});
auto const node_categories_end =
cudf::detail::copy_if_safe(tokens.begin(), tokens.end(), node_categories_it, is_node, stream);
CUDF_EXPECTS(node_categories_end - node_categories_it == num_nodes,
"node category count mismatch");
// Node ranges: copy_if with transform.
rmm::device_uvector<SymbolOffsetT> node_range_begin(num_nodes, stream, mr);
rmm::device_uvector<SymbolOffsetT> node_range_end(num_nodes, stream, mr);
auto const node_range_tuple_it =
thrust::make_zip_iterator(node_range_begin.begin(), node_range_end.begin());
// Whether the tokenizer stage should keep quote characters for string values
// If the tokenizer keeps the quote characters, they may be stripped during type casting
constexpr bool include_quote_char = true;
auto const node_range_out_it = thrust::make_transform_output_iterator(
node_range_tuple_it, node_ranges{tokens, token_indices, include_quote_char});
auto const node_range_out_end = cudf::detail::copy_if_safe(
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(0) + num_tokens,
node_range_out_it,
[is_node, tokens_gpu = tokens.begin()] __device__(size_type i) -> bool {
return is_node(tokens_gpu[i]);
},
stream);
CUDF_EXPECTS(node_range_out_end - node_range_out_it == num_nodes, "node range count mismatch");
return {std::move(node_categories),
std::move(parent_node_ids),
std::move(node_levels),
std::move(node_range_begin),
std::move(node_range_end)};
}
/**
* @brief Generates unique node_type id for each node.
* Field nodes with the same name are assigned the same node_type id.
* List, Struct, and String nodes are assigned their category values as node_type ids.
*
* All inputs and outputs are in node_id order.
* @param d_input JSON string in device memory
* @param d_tree Tree representation of the JSON
* @param stream CUDA stream used for device memory operations and kernel launches.
* @return Vector of node_type ids
*/
rmm::device_uvector<size_type> hash_node_type_with_field_name(device_span<SymbolT const> d_input,
tree_meta_t const& d_tree,
rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
using hash_table_allocator_type = rmm::mr::stream_allocator_adaptor<default_allocator<char>>;
using hash_map_type =
cuco::static_map<size_type, size_type, cuda::thread_scope_device, hash_table_allocator_type>;
auto const num_nodes = d_tree.node_categories.size();
auto const num_fields = thrust::count(rmm::exec_policy(stream),
d_tree.node_categories.begin(),
d_tree.node_categories.end(),
node_t::NC_FN);
constexpr size_type empty_node_index_sentinel = -1;
hash_map_type key_map{compute_hash_table_size(num_fields, 40), // 40% occupancy in hash map
cuco::empty_key{empty_node_index_sentinel},
cuco::empty_value{empty_node_index_sentinel},
hash_table_allocator_type{default_allocator<char>{}, stream},
stream.value()};
auto const d_hasher = [d_input = d_input.data(),
node_range_begin = d_tree.node_range_begin.data(),
node_range_end = d_tree.node_range_end.data()] __device__(auto node_id) {
auto const field_name = cudf::string_view(d_input + node_range_begin[node_id],
node_range_end[node_id] - node_range_begin[node_id]);
return cudf::detail::default_hash<cudf::string_view>{}(field_name);
};
auto const d_equal = [d_input = d_input.data(),
node_range_begin = d_tree.node_range_begin.data(),
node_range_end = d_tree.node_range_end.data()] __device__(auto node_id1,
auto node_id2) {
auto const field_name1 = cudf::string_view(
d_input + node_range_begin[node_id1], node_range_end[node_id1] - node_range_begin[node_id1]);
auto const field_name2 = cudf::string_view(
d_input + node_range_begin[node_id2], node_range_end[node_id2] - node_range_begin[node_id2]);
return field_name1 == field_name2;
};
// key-value pairs: uses node_id itself as node_type. (unique node_id for a field name due to
// hashing)
auto const iter = cudf::detail::make_counting_transform_iterator(
0, [] __device__(size_type i) { return cuco::make_pair(i, i); });
auto const is_field_name_node = [node_categories =
d_tree.node_categories.data()] __device__(auto node_id) {
return node_categories[node_id] == node_t::NC_FN;
};
key_map.insert_if(iter,
iter + num_nodes,
thrust::counting_iterator<size_type>(0), // stencil
is_field_name_node,
d_hasher,
d_equal,
stream.value());
auto const get_hash_value =
[key_map = key_map.get_device_view(), d_hasher, d_equal] __device__(auto node_id) -> size_type {
auto const it = key_map.find(node_id, d_hasher, d_equal);
return (it == key_map.end()) ? size_type{0} : it->second.load(cuda::std::memory_order_relaxed);
};
// convert field nodes to node indices, and other nodes to enum value.
rmm::device_uvector<size_type> node_type(num_nodes, stream);
thrust::tabulate(rmm::exec_policy(stream),
node_type.begin(),
node_type.end(),
[node_categories = d_tree.node_categories.data(),
is_field_name_node,
get_hash_value] __device__(auto node_id) -> size_type {
if (is_field_name_node(node_id))
return static_cast<size_type>(NUM_NODE_CLASSES) + get_hash_value(node_id);
else
return static_cast<size_type>(node_categories[node_id]);
});
return node_type;
}
std::pair<rmm::device_uvector<NodeIndexT>, rmm::device_uvector<NodeIndexT>>
get_array_children_indices(TreeDepthT row_array_children_level,
device_span<TreeDepthT const> node_levels,
device_span<NodeIndexT const> parent_node_ids,
rmm::cuda_stream_view stream)
{
// array children level: (level 2 for values, level 1 for values-JSONLines format)
// copy nodes id of level 1's children (level 2)
// exclusive scan by key (on key their parent_node_id, because we need indices in each row.
// parent_node_id for each row will be same).
// -> return their indices and their node id
auto const num_nodes = node_levels.size();
auto num_level2_nodes = thrust::count(
rmm::exec_policy(stream), node_levels.begin(), node_levels.end(), row_array_children_level);
rmm::device_uvector<NodeIndexT> level2_nodes(num_level2_nodes, stream);
rmm::device_uvector<NodeIndexT> level2_indices(num_level2_nodes, stream);
auto const iter = thrust::copy_if(rmm::exec_policy(stream),
thrust::counting_iterator<NodeIndexT>(0),
thrust::counting_iterator<NodeIndexT>(num_nodes),
node_levels.begin(),
level2_nodes.begin(),
[row_array_children_level] __device__(auto level) {
return level == row_array_children_level;
});
auto level2_parent_nodes =
thrust::make_permutation_iterator(parent_node_ids.begin(), level2_nodes.cbegin());
thrust::exclusive_scan_by_key(rmm::exec_policy(stream),
level2_parent_nodes,
level2_parent_nodes + num_level2_nodes,
thrust::make_constant_iterator(NodeIndexT{1}),
level2_indices.begin());
return std::make_pair(std::move(level2_nodes), std::move(level2_indices));
}
// Two level hashing algorithm
// 1. Convert node_category+fieldname to node_type. (passed as argument)
// a. Create a hashmap to hash field name and assign unique node id as values.
// b. Convert the node categories to node types.
// Node type is defined as node category enum value if it is not a field node,
// otherwise it is the unique node id assigned by the hashmap (value shifted by #NUM_CATEGORY).
// 2. Set operation on entire path of each node
// a. Create a hash map with hash of {node_level, node_type} of its node and the entire parent
// until root.
// b. While creating hashmap, transform node id to unique node ids that are inserted into the
// hash map. This mimics set operation with hash map. This unique node ids are set ids.
// c. Return this converted set ids, which are the hash map keys/values, and unique set ids.
std::pair<rmm::device_uvector<size_type>, rmm::device_uvector<size_type>> hash_node_path(
device_span<TreeDepthT const> node_levels,
device_span<size_type const> node_type,
device_span<NodeIndexT const> parent_node_ids,
bool is_array_of_arrays,
bool is_enabled_lines,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
auto const num_nodes = parent_node_ids.size();
rmm::device_uvector<size_type> col_id(num_nodes, stream, mr);
// array of arrays
NodeIndexT const row_array_children_level = is_enabled_lines ? 1 : 2;
rmm::device_uvector<size_type> list_indices(0, stream);
if (is_array_of_arrays) {
// For array of arrays, level 2 nodes do not have column name (field name).
// So, we need to generate indices for each level 2 node w.r.t to that row, to uniquely
// identify each level 2 node as separate column.
// Example:
// array of structs: [ { a: 1, b: 2}, { a: 3, b: 4} ]
// levels: 0 1 2 3 2 3 1 2 3 2 3
// array of arrays: [ [ 1, 2], [ 3, 4] ]
// levels: 0 1 2 2 1 2 2
// For example, in the above example, we need to generate indices for each level 2 node:
// array of arrays: [ [ 1, 2], [ 3, 4] ]
// levels: 0 1 2 2 1 2 2
// child indices: 0 1 0 1
// These indices uniquely identify each column in each row. This is used during hashing for
// level 2 nodes to generate unique column ids, instead of field name for level 2 nodes.
auto [level2_nodes, level2_indices] =
get_array_children_indices(row_array_children_level, node_levels, parent_node_ids, stream);
// memory usage could be reduced by using different data structure (hashmap)
// or alternate method to hash it at node_type
list_indices.resize(num_nodes, stream);
thrust::scatter(rmm::exec_policy(stream),
level2_indices.cbegin(),
level2_indices.cend(),
level2_nodes.cbegin(),
list_indices.begin());
}
using hash_table_allocator_type = rmm::mr::stream_allocator_adaptor<default_allocator<char>>;
using hash_map_type =
cuco::static_map<size_type, size_type, cuda::thread_scope_device, hash_table_allocator_type>;
constexpr size_type empty_node_index_sentinel = -1;
hash_map_type key_map{compute_hash_table_size(num_nodes), // TODO reduce oversubscription
cuco::empty_key{empty_node_index_sentinel},
cuco::empty_value{empty_node_index_sentinel},
cuco::erased_key{-2},
hash_table_allocator_type{default_allocator<char>{}, stream},
stream.value()};
// path compression is not used since extra writes make all map operations slow.
auto const d_hasher = [node_level = node_levels.begin(),
node_type = node_type.begin(),
parent_node_ids = parent_node_ids.begin(),
list_indices = list_indices.begin(),
is_array_of_arrays,
row_array_children_level] __device__(auto node_id) {
auto hash =
cudf::detail::hash_combine(cudf::detail::default_hash<TreeDepthT>{}(node_level[node_id]),
cudf::detail::default_hash<size_type>{}(node_type[node_id]));
node_id = parent_node_ids[node_id];
// Each node computes its hash by walking from its node up to the root.
while (node_id != parent_node_sentinel) {
hash = cudf::detail::hash_combine(
hash, cudf::detail::default_hash<TreeDepthT>{}(node_level[node_id]));
hash = cudf::detail::hash_combine(
hash, cudf::detail::default_hash<size_type>{}(node_type[node_id]));
if (is_array_of_arrays and node_level[node_id] == row_array_children_level)
hash = cudf::detail::hash_combine(hash, list_indices[node_id]);
node_id = parent_node_ids[node_id];
}
return hash;
};
rmm::device_uvector<hash_value_type> node_hash(num_nodes, stream);
thrust::tabulate(rmm::exec_policy(stream), node_hash.begin(), node_hash.end(), d_hasher);
auto const d_hashed_cache = [node_hash = node_hash.begin()] __device__(auto node_id) {
return node_hash[node_id];
};
auto const d_equal = [node_level = node_levels.begin(),
node_type = node_type.begin(),
parent_node_ids = parent_node_ids.begin(),
is_array_of_arrays,
row_array_children_level,
list_indices = list_indices.begin(),
d_hashed_cache] __device__(auto node_id1, auto node_id2) {
if (node_id1 == node_id2) return true;
if (d_hashed_cache(node_id1) != d_hashed_cache(node_id2)) return false;
auto const is_equal_level =
[node_level, node_type, is_array_of_arrays, row_array_children_level, list_indices](
auto node_id1, auto node_id2) {
if (node_id1 == node_id2) return true;
auto const is_level2_equal = [&]() {
if (!is_array_of_arrays) return true;
return node_level[node_id1] != row_array_children_level or
list_indices[node_id1] == list_indices[node_id2];
}();
return node_level[node_id1] == node_level[node_id2] and
node_type[node_id1] == node_type[node_id2] and is_level2_equal;
};
// if both nodes have same node types at all levels, it will check until it has common parent
// or root.
while (node_id1 != parent_node_sentinel and node_id2 != parent_node_sentinel and
node_id1 != node_id2 and is_equal_level(node_id1, node_id2)) {
node_id1 = parent_node_ids[node_id1];
node_id2 = parent_node_ids[node_id2];
}
return node_id1 == node_id2;
};
// insert and convert node ids to unique set ids
auto const num_inserted = thrust::count_if(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(num_nodes),
[d_hashed_cache,
d_equal,
view = key_map.get_device_mutable_view(),
uq_node_id = col_id.begin()] __device__(auto node_id) mutable {
auto it = view.insert_and_find(cuco::make_pair(node_id, node_id), d_hashed_cache, d_equal);
uq_node_id[node_id] = (it.first)->first.load(cuda::std::memory_order_relaxed);
return it.second;
});
auto const num_columns = num_inserted; // key_map.get_size() is not updated.
rmm::device_uvector<size_type> unique_keys(num_columns, stream);
key_map.retrieve_all(unique_keys.begin(), thrust::make_discard_iterator(), stream.value());
return {std::move(col_id), std::move(unique_keys)};
}
/**
* @brief Generates column id and parent column id for each node
*
* 1. Generate col_id:
* a. Set operation on entire path of each node, translate each node id to set id.
* (two level hashing)
* b. gather unique set ids.
* c. sort and use binary search to generate column ids.
* d. Translate parent node ids to parent column ids.
*
* All inputs and outputs are in node_id order.
* @param d_input JSON string in device memory
* @param d_tree Tree representation of the JSON
* @param is_array_of_arrays Whether the tree is an array of arrays
* @param is_enabled_lines Whether the input is a line-delimited JSON
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return column_id, parent_column_id
*/
std::pair<rmm::device_uvector<NodeIndexT>, rmm::device_uvector<NodeIndexT>> generate_column_id(
device_span<SymbolT const> d_input,
tree_meta_t const& d_tree,
bool is_array_of_arrays,
bool is_enabled_lines,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
auto const num_nodes = d_tree.node_categories.size();
// Two level hashing:
// one for field names -> node_type and,
// another for {node_level, node_category} + field hash for the entire path
// which is {node_level, node_type} recursively using parent_node_id
auto [col_id, unique_keys] = [&]() {
// Convert node_category + field_name to node_type.
rmm::device_uvector<size_type> node_type =
hash_node_type_with_field_name(d_input, d_tree, stream);
// hash entire path from node to root.
return hash_node_path(d_tree.node_levels,
node_type,
d_tree.parent_node_ids,
is_array_of_arrays,
is_enabled_lines,
stream,
mr);
}();
thrust::sort(rmm::exec_policy(stream), unique_keys.begin(), unique_keys.end());
thrust::lower_bound(rmm::exec_policy(stream),
unique_keys.begin(),
unique_keys.end(),
col_id.begin(),
col_id.end(),
col_id.begin());
rmm::device_uvector<size_type> parent_col_id(num_nodes, stream, mr);
thrust::transform(rmm::exec_policy(stream),
d_tree.parent_node_ids.begin(),
d_tree.parent_node_ids.end(),
parent_col_id.begin(),
[col_id = col_id.begin()] __device__(auto node_id) {
return node_id >= 0 ? col_id[node_id] : parent_node_sentinel;
});
return {std::move(col_id), std::move(parent_col_id)};
}
/**
* @brief Computes row indices of each node in the hierarchy.
* 2. Generate row_offset.
* a. Extract only list children
* b. stable_sort by parent_col_id.
* c. scan_by_key {parent_col_id} (done only on nodes who's parent is list)
* d. propagate to non-list leaves from parent list node by recursion
*
* pre-condition:
* d_tree.node_categories, d_tree.parent_node_ids, parent_col_id are in order of node_id.
* post-condition: row_offsets is in order of node_id.
* parent_col_id is moved and reused inside this function.
* @param parent_col_id parent node's column id
* @param d_tree Tree representation of the JSON string
* @param is_array_of_arrays Whether the tree is an array of arrays
* @param is_enabled_lines Whether the input is a line-delimited JSON
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return row_offsets
*/
rmm::device_uvector<size_type> compute_row_offsets(rmm::device_uvector<NodeIndexT>&& parent_col_id,
tree_meta_t const& d_tree,
bool is_array_of_arrays,
bool is_enabled_lines,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
auto const num_nodes = d_tree.node_categories.size();
rmm::device_uvector<size_type> scatter_indices(num_nodes, stream);
thrust::sequence(rmm::exec_policy(stream), scatter_indices.begin(), scatter_indices.end());
// array of arrays
NodeIndexT const row_array_parent_level = is_enabled_lines ? 0 : 1;
// condition is true if parent is not a list, or sentinel/root
// Special case to return true if parent is a list and is_array_of_arrays is true
auto is_non_list_parent = [node_categories = d_tree.node_categories.begin(),
node_levels = d_tree.node_levels.begin(),
is_array_of_arrays,
row_array_parent_level] __device__(auto pnid) {
return !(pnid == parent_node_sentinel ||
node_categories[pnid] == NC_LIST &&
(!is_array_of_arrays || node_levels[pnid] != row_array_parent_level));
};
// Extract only list children. (nodes who's parent is a list/root)
auto const list_parent_end =
thrust::remove_if(rmm::exec_policy(stream),
thrust::make_zip_iterator(parent_col_id.begin(), scatter_indices.begin()),
thrust::make_zip_iterator(parent_col_id.end(), scatter_indices.end()),
d_tree.parent_node_ids.begin(),
is_non_list_parent);
auto const num_list_parent = thrust::distance(
thrust::make_zip_iterator(parent_col_id.begin(), scatter_indices.begin()), list_parent_end);
thrust::stable_sort_by_key(rmm::exec_policy(stream),
parent_col_id.begin(),
parent_col_id.begin() + num_list_parent,
scatter_indices.begin());
rmm::device_uvector<size_type> row_offsets(num_nodes, stream, mr);
// TODO is it possible to generate list child_offsets too here?
// write only 1st child offset to parent node id child_offsets?
thrust::exclusive_scan_by_key(rmm::exec_policy(stream),
parent_col_id.begin(),
parent_col_id.begin() + num_list_parent,
thrust::make_constant_iterator<size_type>(1),
row_offsets.begin());
// Using scatter instead of sort.
auto& temp_storage = parent_col_id; // reuse parent_col_id as temp storage
thrust::scatter(rmm::exec_policy(stream),
row_offsets.begin(),
row_offsets.begin() + num_list_parent,
scatter_indices.begin(),
temp_storage.begin());
row_offsets = std::move(temp_storage);
// Propagate row offsets to non-list leaves from list's immediate children node by recursion
thrust::transform_if(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(num_nodes),
row_offsets.begin(),
[node_categories = d_tree.node_categories.data(),
parent_node_ids = d_tree.parent_node_ids.begin(),
row_offsets = row_offsets.begin(),
is_non_list_parent] __device__(size_type node_id) {
auto parent_node_id = parent_node_ids[node_id];
while (is_non_list_parent(parent_node_id)) {
node_id = parent_node_id;
parent_node_id = parent_node_ids[parent_node_id];
}
return row_offsets[node_id];
},
[node_categories = d_tree.node_categories.data(),
parent_node_ids = d_tree.parent_node_ids.begin(),
is_non_list_parent] __device__(size_type node_id) {
auto const parent_node_id = parent_node_ids[node_id];
return is_non_list_parent(parent_node_id);
});
return row_offsets;
}
// This algorithm assigns a unique column id to each node in the tree.
// The row offset is the row index of the node in that column id.
// Algorithm:
// 1. Generate col_id:
// a. Set operation on entire path of each node, translate each node id to set id.
// b. gather unique set ids.
// c. sort and use binary search to generate column ids.
// d. Translate parent node ids to parent column ids.
// 2. Generate row_offset.
// a. filter only list children
// a. stable_sort by parent_col_id.
// b. scan_by_key {parent_col_id} (done only on nodes whose parent is a list)
// c. propagate to non-list leaves from parent list node by recursion
std::tuple<rmm::device_uvector<NodeIndexT>, rmm::device_uvector<size_type>>
records_orient_tree_traversal(device_span<SymbolT const> d_input,
tree_meta_t const& d_tree,
bool is_array_of_arrays,
bool is_enabled_lines,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
auto [new_col_id, new_parent_col_id] =
generate_column_id(d_input, d_tree, is_array_of_arrays, is_enabled_lines, stream, mr);
auto row_offsets = compute_row_offsets(
std::move(new_parent_col_id), d_tree, is_array_of_arrays, is_enabled_lines, stream, mr);
return std::tuple{std::move(new_col_id), std::move(row_offsets)};
}
} // namespace detail
} // namespace cudf::io::json
| 8cf10de9dffd1cdb16752dc48380e883a15aed8d.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "nested_json.hpp"
#include <hash/hash_allocator.cuh>
#include <hash/helper_functions.cuh>
#include <io/utilities/hostdevice_vector.hpp>
#include <cudf/detail/hashing.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/scatter.cuh>
#include <cudf/detail/utilities/algorithm.cuh>
#include <cudf/detail/utilities/hash_functions.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <cuco/static_map.cuh>
#include <cub/device/device_radix_sort.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <rmm/mr/device/polymorphic_allocator.hpp>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/fill.h>
#include <thrust/gather.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>
#include <thrust/remove.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/tabulate.h>
#include <thrust/transform.h>
#include <limits>
namespace cudf::io::json {
namespace detail {
// The node that a token represents
struct token_to_node {
__device__ auto operator()(PdaTokenT const token) -> NodeT
{
switch (token) {
case token_t::StructBegin: return NC_STRUCT;
case token_t::ListBegin: return NC_LIST;
case token_t::StringBegin: return NC_STR;
case token_t::ValueBegin:
return NC_STR; // NC_VAL;
// NV_VAL is removed because type inference and
// reduce_to_column_tree category collapsing takes care of this.
case token_t::FieldNameBegin: return NC_FN;
default: return NC_ERR;
};
}
};
// Convert token indices to node range for each valid node.
struct node_ranges {
device_span<PdaTokenT const> tokens;
device_span<SymbolOffsetT const> token_indices;
bool include_quote_char;
__device__ auto operator()(size_type i) -> thrust::tuple<SymbolOffsetT, SymbolOffsetT>
{
// Whether a token expects to be followed by its respective end-of-* token partner
auto const is_begin_of_section = [] __device__(PdaTokenT const token) {
switch (token) {
case token_t::StringBegin:
case token_t::ValueBegin:
case token_t::FieldNameBegin: return true;
default: return false;
};
};
// The end-of-* partner token for a given beginning-of-* token
auto const end_of_partner = [] __device__(PdaTokenT const token) {
switch (token) {
case token_t::StringBegin: return token_t::StringEnd;
case token_t::ValueBegin: return token_t::ValueEnd;
case token_t::FieldNameBegin: return token_t::FieldNameEnd;
default: return token_t::ErrorBegin;
};
};
// Includes quote char for end-of-string token or Skips the quote char for
// beginning-of-field-name token
auto const get_token_index = [include_quote_char = include_quote_char] __device__(
PdaTokenT const token, SymbolOffsetT const token_index) {
constexpr SymbolOffsetT quote_char_size = 1;
switch (token) {
// Strip off quote char included for StringBegin
case token_t::StringBegin: return token_index + (include_quote_char ? 0 : quote_char_size);
// Strip off or Include trailing quote char for string values for StringEnd
case token_t::StringEnd: return token_index + (include_quote_char ? quote_char_size : 0);
// Strip off quote char included for FieldNameBegin
case token_t::FieldNameBegin: return token_index + quote_char_size;
default: return token_index;
};
};
PdaTokenT const token = tokens[i];
// The section from the original JSON input that this token demarcates
SymbolOffsetT range_begin = get_token_index(token, token_indices[i]);
SymbolOffsetT range_end = range_begin + 1; // non-leaf, non-field nodes ignore this value.
if (is_begin_of_section(token)) {
if ((i + 1) < tokens.size() && end_of_partner(token) == tokens[i + 1]) {
// Update the range_end for this pair of tokens
range_end = get_token_index(tokens[i + 1], token_indices[i + 1]);
}
}
return thrust::make_tuple(range_begin, range_end);
}
};
/**
* @brief Returns stable sorted keys and its sorted order
*
* Uses cub stable radix sort. The order is internally generated, hence it saves a copy and memory.
* Since the key and order is returned, using double buffer helps to avoid extra copy to user
* provided output iterator.
*
* @tparam IndexType sorted order type
* @tparam KeyType key type
* @param keys keys to sort
* @param stream CUDA stream used for device memory operations and kernel launches.
* @return Sorted keys and indices producing that sorted order
*/
template <typename IndexType = size_t, typename KeyType>
std::pair<rmm::device_uvector<KeyType>, rmm::device_uvector<IndexType>> stable_sorted_key_order(
cudf::device_span<KeyType const> keys, rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
// Determine temporary device storage requirements
rmm::device_uvector<KeyType> keys_buffer1(keys.size(), stream);
rmm::device_uvector<KeyType> keys_buffer2(keys.size(), stream);
rmm::device_uvector<IndexType> order_buffer1(keys.size(), stream);
rmm::device_uvector<IndexType> order_buffer2(keys.size(), stream);
cub::DoubleBuffer<IndexType> order_buffer(order_buffer1.data(), order_buffer2.data());
cub::DoubleBuffer<KeyType> keys_buffer(keys_buffer1.data(), keys_buffer2.data());
size_t temp_storage_bytes = 0;
cub::DeviceRadixSort::SortPairs(
nullptr, temp_storage_bytes, keys_buffer, order_buffer, keys.size());
rmm::device_buffer d_temp_storage(temp_storage_bytes, stream);
thrust::copy(rmm::exec_policy(stream), keys.begin(), keys.end(), keys_buffer1.begin());
thrust::sequence(rmm::exec_policy(stream), order_buffer1.begin(), order_buffer1.end());
cub::DeviceRadixSort::SortPairs(d_temp_storage.data(),
temp_storage_bytes,
keys_buffer,
order_buffer,
keys.size(),
0,
sizeof(KeyType) * 8,
stream.value());
return std::pair{keys_buffer.Current() == keys_buffer1.data() ? std::move(keys_buffer1)
: std::move(keys_buffer2),
order_buffer.Current() == order_buffer1.data() ? std::move(order_buffer1)
: std::move(order_buffer2)};
}
/**
* @brief Propagate parent node to siblings from first sibling.
*
* @param node_levels Node levels of each node
* @param parent_node_ids parent node ids initialized for first child of each push node,
* and other siblings are initialized to -1.
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
void propagate_parent_to_siblings(cudf::device_span<TreeDepthT const> node_levels,
cudf::device_span<NodeIndexT> parent_node_ids,
rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
auto [sorted_node_levels, sorted_order] = stable_sorted_key_order<size_type>(node_levels, stream);
// instead of gather, using permutation_iterator, which is ~17% faster
thrust::inclusive_scan_by_key(
rmm::exec_policy(stream),
sorted_node_levels.begin(),
sorted_node_levels.end(),
thrust::make_permutation_iterator(parent_node_ids.begin(), sorted_order.begin()),
thrust::make_permutation_iterator(parent_node_ids.begin(), sorted_order.begin()),
thrust::equal_to<TreeDepthT>{},
thrust::maximum<NodeIndexT>{});
}
// Generates a tree representation of the given tokens, token_indices.
tree_meta_t get_tree_representation(device_span<PdaTokenT const> tokens,
device_span<SymbolOffsetT const> token_indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
// Whether a token does represent a node in the tree representation
auto const is_node = [] __device__(PdaTokenT const token) -> bool {
switch (token) {
case token_t::StructBegin:
case token_t::ListBegin:
case token_t::StringBegin:
case token_t::ValueBegin:
case token_t::FieldNameBegin:
case token_t::ErrorBegin: return true;
default: return false;
};
};
// Whether the token pops from the parent node stack
auto const does_pop = [] __device__(PdaTokenT const token) -> bool {
switch (token) {
case token_t::StructMemberEnd:
case token_t::StructEnd:
case token_t::ListEnd: return true;
default: return false;
};
};
// Whether the token pushes onto the parent node stack
auto const does_push = [] __device__(PdaTokenT const token) -> bool {
switch (token) {
case token_t::FieldNameBegin:
case token_t::StructBegin:
case token_t::ListBegin: return true;
default: return false;
};
};
// Look for ErrorBegin and report the point of error.
if (auto const error_count =
thrust::count(rmm::exec_policy(stream), tokens.begin(), tokens.end(), token_t::ErrorBegin);
error_count > 0) {
auto const error_location =
thrust::find(rmm::exec_policy(stream), tokens.begin(), tokens.end(), token_t::ErrorBegin);
SymbolOffsetT error_index;
CUDF_CUDA_TRY(
cudaMemcpyAsync(&error_index,
token_indices.data() + thrust::distance(tokens.begin(), error_location),
sizeof(SymbolOffsetT),
cudaMemcpyDefault,
stream.value()));
stream.synchronize();
CUDF_FAIL("JSON Parser encountered an invalid format at location " +
std::to_string(error_index));
}
auto const num_tokens = tokens.size();
auto const num_nodes =
thrust::count_if(rmm::exec_policy(stream), tokens.begin(), tokens.end(), is_node);
// Node levels: transform_exclusive_scan, copy_if.
rmm::device_uvector<TreeDepthT> node_levels(num_nodes, stream, mr);
{
rmm::device_uvector<TreeDepthT> token_levels(num_tokens, stream);
auto const push_pop_it = thrust::make_transform_iterator(
tokens.begin(), [does_push, does_pop] __device__(PdaTokenT const token) -> size_type {
return does_push(token) - does_pop(token);
});
thrust::exclusive_scan(
rmm::exec_policy(stream), push_pop_it, push_pop_it + num_tokens, token_levels.begin());
auto const node_levels_end = cudf::detail::copy_if_safe(token_levels.begin(),
token_levels.end(),
tokens.begin(),
node_levels.begin(),
is_node,
stream);
CUDF_EXPECTS(thrust::distance(node_levels.begin(), node_levels_end) == num_nodes,
"node level count mismatch");
}
// Node parent ids:
// previous push node_id transform, stable sort by level, segmented scan with Max, reorder.
rmm::device_uvector<NodeIndexT> parent_node_ids(num_nodes, stream, mr);
// This block of code is generalized logical stack algorithm. TODO: make this a separate function.
{
rmm::device_uvector<NodeIndexT> node_token_ids(num_nodes, stream);
cudf::detail::copy_if_safe(thrust::make_counting_iterator<NodeIndexT>(0),
thrust::make_counting_iterator<NodeIndexT>(0) + num_tokens,
tokens.begin(),
node_token_ids.begin(),
is_node,
stream);
// previous push node_id
// if previous node is a push, then i-1
// if previous node is FE, then i-2 (returns FB's index)
// if previous node is SMB and its previous node is a push, then i-2
// eg. `{ SMB FB FE VB VE SME` -> `{` index as FB's parent.
// else -1
auto const first_childs_parent_token_id = [tokens_gpu =
tokens.begin()] __device__(auto i) -> NodeIndexT {
if (i <= 0) { return -1; }
if (tokens_gpu[i - 1] == token_t::StructBegin or tokens_gpu[i - 1] == token_t::ListBegin) {
return i - 1;
} else if (tokens_gpu[i - 1] == token_t::FieldNameEnd) {
return i - 2;
} else if (tokens_gpu[i - 1] == token_t::StructMemberBegin and
(tokens_gpu[i - 2] == token_t::StructBegin ||
tokens_gpu[i - 2] == token_t::ListBegin)) {
return i - 2;
} else {
return -1;
}
};
thrust::transform(
rmm::exec_policy(stream),
node_token_ids.begin(),
node_token_ids.end(),
parent_node_ids.begin(),
[node_ids_gpu = node_token_ids.begin(), num_nodes, first_childs_parent_token_id] __device__(
NodeIndexT const tid) -> NodeIndexT {
auto const pid = first_childs_parent_token_id(tid);
return pid < 0
? parent_node_sentinel
: thrust::lower_bound(thrust::seq, node_ids_gpu, node_ids_gpu + num_nodes, pid) -
node_ids_gpu;
// parent_node_sentinel is -1, useful for segmented max operation below
});
}
// Propagate parent node to siblings from first sibling - inplace.
propagate_parent_to_siblings(
cudf::device_span<TreeDepthT const>{node_levels.data(), node_levels.size()},
parent_node_ids,
stream);
// Node categories: copy_if with transform.
rmm::device_uvector<NodeT> node_categories(num_nodes, stream, mr);
auto const node_categories_it =
thrust::make_transform_output_iterator(node_categories.begin(), token_to_node{});
auto const node_categories_end =
cudf::detail::copy_if_safe(tokens.begin(), tokens.end(), node_categories_it, is_node, stream);
CUDF_EXPECTS(node_categories_end - node_categories_it == num_nodes,
"node category count mismatch");
// Node ranges: copy_if with transform.
rmm::device_uvector<SymbolOffsetT> node_range_begin(num_nodes, stream, mr);
rmm::device_uvector<SymbolOffsetT> node_range_end(num_nodes, stream, mr);
auto const node_range_tuple_it =
thrust::make_zip_iterator(node_range_begin.begin(), node_range_end.begin());
// Whether the tokenizer stage should keep quote characters for string values
// If the tokenizer keeps the quote characters, they may be stripped during type casting
constexpr bool include_quote_char = true;
auto const node_range_out_it = thrust::make_transform_output_iterator(
node_range_tuple_it, node_ranges{tokens, token_indices, include_quote_char});
auto const node_range_out_end = cudf::detail::copy_if_safe(
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(0) + num_tokens,
node_range_out_it,
[is_node, tokens_gpu = tokens.begin()] __device__(size_type i) -> bool {
return is_node(tokens_gpu[i]);
},
stream);
CUDF_EXPECTS(node_range_out_end - node_range_out_it == num_nodes, "node range count mismatch");
return {std::move(node_categories),
std::move(parent_node_ids),
std::move(node_levels),
std::move(node_range_begin),
std::move(node_range_end)};
}
/**
* @brief Generates unique node_type id for each node.
* Field nodes with the same name are assigned the same node_type id.
* List, Struct, and String nodes are assigned their category values as node_type ids.
*
* All inputs and outputs are in node_id order.
* @param d_input JSON string in device memory
* @param d_tree Tree representation of the JSON
* @param stream CUDA stream used for device memory operations and kernel launches.
* @return Vector of node_type ids
*/
rmm::device_uvector<size_type> hash_node_type_with_field_name(device_span<SymbolT const> d_input,
tree_meta_t const& d_tree,
rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
using hash_table_allocator_type = rmm::mr::stream_allocator_adaptor<default_allocator<char>>;
using hash_map_type =
cuco::static_map<size_type, size_type, cuda::thread_scope_device, hash_table_allocator_type>;
auto const num_nodes = d_tree.node_categories.size();
auto const num_fields = thrust::count(rmm::exec_policy(stream),
d_tree.node_categories.begin(),
d_tree.node_categories.end(),
node_t::NC_FN);
constexpr size_type empty_node_index_sentinel = -1;
hash_map_type key_map{compute_hash_table_size(num_fields, 40), // 40% occupancy in hash map
cuco::empty_key{empty_node_index_sentinel},
cuco::empty_value{empty_node_index_sentinel},
hash_table_allocator_type{default_allocator<char>{}, stream},
stream.value()};
auto const d_hasher = [d_input = d_input.data(),
node_range_begin = d_tree.node_range_begin.data(),
node_range_end = d_tree.node_range_end.data()] __device__(auto node_id) {
auto const field_name = cudf::string_view(d_input + node_range_begin[node_id],
node_range_end[node_id] - node_range_begin[node_id]);
return cudf::detail::default_hash<cudf::string_view>{}(field_name);
};
auto const d_equal = [d_input = d_input.data(),
node_range_begin = d_tree.node_range_begin.data(),
node_range_end = d_tree.node_range_end.data()] __device__(auto node_id1,
auto node_id2) {
auto const field_name1 = cudf::string_view(
d_input + node_range_begin[node_id1], node_range_end[node_id1] - node_range_begin[node_id1]);
auto const field_name2 = cudf::string_view(
d_input + node_range_begin[node_id2], node_range_end[node_id2] - node_range_begin[node_id2]);
return field_name1 == field_name2;
};
// key-value pairs: uses node_id itself as node_type. (unique node_id for a field name due to
// hashing)
auto const iter = cudf::detail::make_counting_transform_iterator(
0, [] __device__(size_type i) { return cuco::make_pair(i, i); });
auto const is_field_name_node = [node_categories =
d_tree.node_categories.data()] __device__(auto node_id) {
return node_categories[node_id] == node_t::NC_FN;
};
key_map.insert_if(iter,
iter + num_nodes,
thrust::counting_iterator<size_type>(0), // stencil
is_field_name_node,
d_hasher,
d_equal,
stream.value());
auto const get_hash_value =
[key_map = key_map.get_device_view(), d_hasher, d_equal] __device__(auto node_id) -> size_type {
auto const it = key_map.find(node_id, d_hasher, d_equal);
return (it == key_map.end()) ? size_type{0} : it->second.load(cuda::std::memory_order_relaxed);
};
// convert field nodes to node indices, and other nodes to enum value.
rmm::device_uvector<size_type> node_type(num_nodes, stream);
thrust::tabulate(rmm::exec_policy(stream),
node_type.begin(),
node_type.end(),
[node_categories = d_tree.node_categories.data(),
is_field_name_node,
get_hash_value] __device__(auto node_id) -> size_type {
if (is_field_name_node(node_id))
return static_cast<size_type>(NUM_NODE_CLASSES) + get_hash_value(node_id);
else
return static_cast<size_type>(node_categories[node_id]);
});
return node_type;
}
std::pair<rmm::device_uvector<NodeIndexT>, rmm::device_uvector<NodeIndexT>>
get_array_children_indices(TreeDepthT row_array_children_level,
device_span<TreeDepthT const> node_levels,
device_span<NodeIndexT const> parent_node_ids,
rmm::cuda_stream_view stream)
{
// array children level: (level 2 for values, level 1 for values-JSONLines format)
// copy nodes id of level 1's children (level 2)
// exclusive scan by key (on key their parent_node_id, because we need indices in each row.
// parent_node_id for each row will be same).
// -> return their indices and their node id
auto const num_nodes = node_levels.size();
auto num_level2_nodes = thrust::count(
rmm::exec_policy(stream), node_levels.begin(), node_levels.end(), row_array_children_level);
rmm::device_uvector<NodeIndexT> level2_nodes(num_level2_nodes, stream);
rmm::device_uvector<NodeIndexT> level2_indices(num_level2_nodes, stream);
auto const iter = thrust::copy_if(rmm::exec_policy(stream),
thrust::counting_iterator<NodeIndexT>(0),
thrust::counting_iterator<NodeIndexT>(num_nodes),
node_levels.begin(),
level2_nodes.begin(),
[row_array_children_level] __device__(auto level) {
return level == row_array_children_level;
});
auto level2_parent_nodes =
thrust::make_permutation_iterator(parent_node_ids.begin(), level2_nodes.cbegin());
thrust::exclusive_scan_by_key(rmm::exec_policy(stream),
level2_parent_nodes,
level2_parent_nodes + num_level2_nodes,
thrust::make_constant_iterator(NodeIndexT{1}),
level2_indices.begin());
return std::make_pair(std::move(level2_nodes), std::move(level2_indices));
}
// Two level hashing algorithm
// 1. Convert node_category+fieldname to node_type. (passed as argument)
// a. Create a hashmap to hash field name and assign unique node id as values.
// b. Convert the node categories to node types.
// Node type is defined as node category enum value if it is not a field node,
// otherwise it is the unique node id assigned by the hashmap (value shifted by #NUM_CATEGORY).
// 2. Set operation on entire path of each node
// a. Create a hash map with hash of {node_level, node_type} of its node and the entire parent
// until root.
// b. While creating hashmap, transform node id to unique node ids that are inserted into the
// hash map. This mimics set operation with hash map. This unique node ids are set ids.
// c. Return this converted set ids, which are the hash map keys/values, and unique set ids.
std::pair<rmm::device_uvector<size_type>, rmm::device_uvector<size_type>> hash_node_path(
device_span<TreeDepthT const> node_levels,
device_span<size_type const> node_type,
device_span<NodeIndexT const> parent_node_ids,
bool is_array_of_arrays,
bool is_enabled_lines,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
auto const num_nodes = parent_node_ids.size();
rmm::device_uvector<size_type> col_id(num_nodes, stream, mr);
// array of arrays
NodeIndexT const row_array_children_level = is_enabled_lines ? 1 : 2;
rmm::device_uvector<size_type> list_indices(0, stream);
if (is_array_of_arrays) {
// For array of arrays, level 2 nodes do not have column name (field name).
// So, we need to generate indices for each level 2 node w.r.t to that row, to uniquely
// identify each level 2 node as separate column.
// Example:
// array of structs: [ { a: 1, b: 2}, { a: 3, b: 4} ]
// levels: 0 1 2 3 2 3 1 2 3 2 3
// array of arrays: [ [ 1, 2], [ 3, 4] ]
// levels: 0 1 2 2 1 2 2
// For example, in the above example, we need to generate indices for each level 2 node:
// array of arrays: [ [ 1, 2], [ 3, 4] ]
// levels: 0 1 2 2 1 2 2
// child indices: 0 1 0 1
// These indices uniquely identify each column in each row. This is used during hashing for
// level 2 nodes to generate unique column ids, instead of field name for level 2 nodes.
auto [level2_nodes, level2_indices] =
get_array_children_indices(row_array_children_level, node_levels, parent_node_ids, stream);
// memory usage could be reduced by using different data structure (hashmap)
// or alternate method to hash it at node_type
list_indices.resize(num_nodes, stream);
thrust::scatter(rmm::exec_policy(stream),
level2_indices.cbegin(),
level2_indices.cend(),
level2_nodes.cbegin(),
list_indices.begin());
}
using hash_table_allocator_type = rmm::mr::stream_allocator_adaptor<default_allocator<char>>;
using hash_map_type =
cuco::static_map<size_type, size_type, cuda::thread_scope_device, hash_table_allocator_type>;
constexpr size_type empty_node_index_sentinel = -1;
hash_map_type key_map{compute_hash_table_size(num_nodes), // TODO reduce oversubscription
cuco::empty_key{empty_node_index_sentinel},
cuco::empty_value{empty_node_index_sentinel},
cuco::erased_key{-2},
hash_table_allocator_type{default_allocator<char>{}, stream},
stream.value()};
// path compression is not used since extra writes make all map operations slow.
auto const d_hasher = [node_level = node_levels.begin(),
node_type = node_type.begin(),
parent_node_ids = parent_node_ids.begin(),
list_indices = list_indices.begin(),
is_array_of_arrays,
row_array_children_level] __device__(auto node_id) {
auto hash =
cudf::detail::hash_combine(cudf::detail::default_hash<TreeDepthT>{}(node_level[node_id]),
cudf::detail::default_hash<size_type>{}(node_type[node_id]));
node_id = parent_node_ids[node_id];
// Each node computes its hash by walking from its node up to the root.
while (node_id != parent_node_sentinel) {
hash = cudf::detail::hash_combine(
hash, cudf::detail::default_hash<TreeDepthT>{}(node_level[node_id]));
hash = cudf::detail::hash_combine(
hash, cudf::detail::default_hash<size_type>{}(node_type[node_id]));
if (is_array_of_arrays and node_level[node_id] == row_array_children_level)
hash = cudf::detail::hash_combine(hash, list_indices[node_id]);
node_id = parent_node_ids[node_id];
}
return hash;
};
rmm::device_uvector<hash_value_type> node_hash(num_nodes, stream);
thrust::tabulate(rmm::exec_policy(stream), node_hash.begin(), node_hash.end(), d_hasher);
auto const d_hashed_cache = [node_hash = node_hash.begin()] __device__(auto node_id) {
return node_hash[node_id];
};
auto const d_equal = [node_level = node_levels.begin(),
node_type = node_type.begin(),
parent_node_ids = parent_node_ids.begin(),
is_array_of_arrays,
row_array_children_level,
list_indices = list_indices.begin(),
d_hashed_cache] __device__(auto node_id1, auto node_id2) {
if (node_id1 == node_id2) return true;
if (d_hashed_cache(node_id1) != d_hashed_cache(node_id2)) return false;
auto const is_equal_level =
[node_level, node_type, is_array_of_arrays, row_array_children_level, list_indices](
auto node_id1, auto node_id2) {
if (node_id1 == node_id2) return true;
auto const is_level2_equal = [&]() {
if (!is_array_of_arrays) return true;
return node_level[node_id1] != row_array_children_level or
list_indices[node_id1] == list_indices[node_id2];
}();
return node_level[node_id1] == node_level[node_id2] and
node_type[node_id1] == node_type[node_id2] and is_level2_equal;
};
// if both nodes have same node types at all levels, it will check until it has common parent
// or root.
while (node_id1 != parent_node_sentinel and node_id2 != parent_node_sentinel and
node_id1 != node_id2 and is_equal_level(node_id1, node_id2)) {
node_id1 = parent_node_ids[node_id1];
node_id2 = parent_node_ids[node_id2];
}
return node_id1 == node_id2;
};
// insert and convert node ids to unique set ids
auto const num_inserted = thrust::count_if(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(num_nodes),
[d_hashed_cache,
d_equal,
view = key_map.get_device_mutable_view(),
uq_node_id = col_id.begin()] __device__(auto node_id) mutable {
auto it = view.insert_and_find(cuco::make_pair(node_id, node_id), d_hashed_cache, d_equal);
uq_node_id[node_id] = (it.first)->first.load(cuda::std::memory_order_relaxed);
return it.second;
});
auto const num_columns = num_inserted; // key_map.get_size() is not updated.
rmm::device_uvector<size_type> unique_keys(num_columns, stream);
key_map.retrieve_all(unique_keys.begin(), thrust::make_discard_iterator(), stream.value());
return {std::move(col_id), std::move(unique_keys)};
}
/**
* @brief Generates column id and parent column id for each node
*
* 1. Generate col_id:
* a. Set operation on entire path of each node, translate each node id to set id.
* (two level hashing)
* b. gather unique set ids.
* c. sort and use binary search to generate column ids.
* d. Translate parent node ids to parent column ids.
*
* All inputs and outputs are in node_id order.
* @param d_input JSON string in device memory
* @param d_tree Tree representation of the JSON
* @param is_array_of_arrays Whether the tree is an array of arrays
* @param is_enabled_lines Whether the input is a line-delimited JSON
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return column_id, parent_column_id
*/
std::pair<rmm::device_uvector<NodeIndexT>, rmm::device_uvector<NodeIndexT>> generate_column_id(
device_span<SymbolT const> d_input,
tree_meta_t const& d_tree,
bool is_array_of_arrays,
bool is_enabled_lines,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
auto const num_nodes = d_tree.node_categories.size();
// Two level hashing:
// one for field names -> node_type and,
// another for {node_level, node_category} + field hash for the entire path
// which is {node_level, node_type} recursively using parent_node_id
auto [col_id, unique_keys] = [&]() {
// Convert node_category + field_name to node_type.
rmm::device_uvector<size_type> node_type =
hash_node_type_with_field_name(d_input, d_tree, stream);
// hash entire path from node to root.
return hash_node_path(d_tree.node_levels,
node_type,
d_tree.parent_node_ids,
is_array_of_arrays,
is_enabled_lines,
stream,
mr);
}();
thrust::sort(rmm::exec_policy(stream), unique_keys.begin(), unique_keys.end());
thrust::lower_bound(rmm::exec_policy(stream),
unique_keys.begin(),
unique_keys.end(),
col_id.begin(),
col_id.end(),
col_id.begin());
rmm::device_uvector<size_type> parent_col_id(num_nodes, stream, mr);
thrust::transform(rmm::exec_policy(stream),
d_tree.parent_node_ids.begin(),
d_tree.parent_node_ids.end(),
parent_col_id.begin(),
[col_id = col_id.begin()] __device__(auto node_id) {
return node_id >= 0 ? col_id[node_id] : parent_node_sentinel;
});
return {std::move(col_id), std::move(parent_col_id)};
}
/**
* @brief Computes row indices of each node in the hierarchy.
* 2. Generate row_offset.
* a. Extract only list children
* b. stable_sort by parent_col_id.
* c. scan_by_key {parent_col_id} (done only on nodes who's parent is list)
* d. propagate to non-list leaves from parent list node by recursion
*
* pre-condition:
* d_tree.node_categories, d_tree.parent_node_ids, parent_col_id are in order of node_id.
* post-condition: row_offsets is in order of node_id.
* parent_col_id is moved and reused inside this function.
* @param parent_col_id parent node's column id
* @param d_tree Tree representation of the JSON string
* @param is_array_of_arrays Whether the tree is an array of arrays
* @param is_enabled_lines Whether the input is a line-delimited JSON
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return row_offsets
*/
rmm::device_uvector<size_type> compute_row_offsets(rmm::device_uvector<NodeIndexT>&& parent_col_id,
tree_meta_t const& d_tree,
bool is_array_of_arrays,
bool is_enabled_lines,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
auto const num_nodes = d_tree.node_categories.size();
rmm::device_uvector<size_type> scatter_indices(num_nodes, stream);
thrust::sequence(rmm::exec_policy(stream), scatter_indices.begin(), scatter_indices.end());
// array of arrays
NodeIndexT const row_array_parent_level = is_enabled_lines ? 0 : 1;
// condition is true if parent is not a list, or sentinel/root
// Special case to return true if parent is a list and is_array_of_arrays is true
auto is_non_list_parent = [node_categories = d_tree.node_categories.begin(),
node_levels = d_tree.node_levels.begin(),
is_array_of_arrays,
row_array_parent_level] __device__(auto pnid) {
return !(pnid == parent_node_sentinel ||
node_categories[pnid] == NC_LIST &&
(!is_array_of_arrays || node_levels[pnid] != row_array_parent_level));
};
// Extract only list children. (nodes who's parent is a list/root)
auto const list_parent_end =
thrust::remove_if(rmm::exec_policy(stream),
thrust::make_zip_iterator(parent_col_id.begin(), scatter_indices.begin()),
thrust::make_zip_iterator(parent_col_id.end(), scatter_indices.end()),
d_tree.parent_node_ids.begin(),
is_non_list_parent);
auto const num_list_parent = thrust::distance(
thrust::make_zip_iterator(parent_col_id.begin(), scatter_indices.begin()), list_parent_end);
thrust::stable_sort_by_key(rmm::exec_policy(stream),
parent_col_id.begin(),
parent_col_id.begin() + num_list_parent,
scatter_indices.begin());
rmm::device_uvector<size_type> row_offsets(num_nodes, stream, mr);
// TODO is it possible to generate list child_offsets too here?
// write only 1st child offset to parent node id child_offsets?
thrust::exclusive_scan_by_key(rmm::exec_policy(stream),
parent_col_id.begin(),
parent_col_id.begin() + num_list_parent,
thrust::make_constant_iterator<size_type>(1),
row_offsets.begin());
// Using scatter instead of sort.
auto& temp_storage = parent_col_id; // reuse parent_col_id as temp storage
thrust::scatter(rmm::exec_policy(stream),
row_offsets.begin(),
row_offsets.begin() + num_list_parent,
scatter_indices.begin(),
temp_storage.begin());
row_offsets = std::move(temp_storage);
// Propagate row offsets to non-list leaves from list's immediate children node by recursion
thrust::transform_if(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(num_nodes),
row_offsets.begin(),
[node_categories = d_tree.node_categories.data(),
parent_node_ids = d_tree.parent_node_ids.begin(),
row_offsets = row_offsets.begin(),
is_non_list_parent] __device__(size_type node_id) {
auto parent_node_id = parent_node_ids[node_id];
while (is_non_list_parent(parent_node_id)) {
node_id = parent_node_id;
parent_node_id = parent_node_ids[parent_node_id];
}
return row_offsets[node_id];
},
[node_categories = d_tree.node_categories.data(),
parent_node_ids = d_tree.parent_node_ids.begin(),
is_non_list_parent] __device__(size_type node_id) {
auto const parent_node_id = parent_node_ids[node_id];
return is_non_list_parent(parent_node_id);
});
return row_offsets;
}
// This algorithm assigns a unique column id to each node in the tree.
// The row offset is the row index of the node in that column id.
// Algorithm:
// 1. Generate col_id:
// a. Set operation on entire path of each node, translate each node id to set id.
// b. gather unique set ids.
// c. sort and use binary search to generate column ids.
// d. Translate parent node ids to parent column ids.
// 2. Generate row_offset.
// a. filter only list children
// a. stable_sort by parent_col_id.
// b. scan_by_key {parent_col_id} (done only on nodes whose parent is a list)
// c. propagate to non-list leaves from parent list node by recursion
std::tuple<rmm::device_uvector<NodeIndexT>, rmm::device_uvector<size_type>>
records_orient_tree_traversal(device_span<SymbolT const> d_input,
tree_meta_t const& d_tree,
bool is_array_of_arrays,
bool is_enabled_lines,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
auto [new_col_id, new_parent_col_id] =
generate_column_id(d_input, d_tree, is_array_of_arrays, is_enabled_lines, stream, mr);
auto row_offsets = compute_row_offsets(
std::move(new_parent_col_id), d_tree, is_array_of_arrays, is_enabled_lines, stream, mr);
return std::tuple{std::move(new_col_id), std::move(row_offsets)};
}
} // namespace detail
} // namespace cudf::io::json
|
302ec428054d69d8b788fc31395f532004e53d82.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file simulation.cu
* @author Adam Rogowiec
*
* This file is an integral part of the master thesis entitled:
* "Elaboration and implementation in CUDA technology parallel version of
* estimation of multidimensional random variable density function ridge
* detection algorithm."
* , which is conducted under the supervision of prof. dr hab. in. Marek
* Nacz.
*
* Institute of Control and Computation Engineering Faculty of Electronics and
* Information Technology Warsaw University of Technology 2016
*/
#define BLOCK_TILE_LOAD_V4 1
#include <iostream>
#include <sstream>
#include <typeinfo>
#include <stdexcept>
#include <string>
#include <limits>
#ifdef RD_USE_OPENMP
#include <omp.h>
#endif
#include <helper_cuda.h>
#include <hip/hip_runtime.h>
#include "rd/gpu/device/brute_force/simulation.cuh"
#include "rd/gpu/device/samples_generator.cuh"
#include "rd/gpu/util/dev_memcpy.cuh"
#include "rd/gpu/util/dev_static_for.cuh"
#include "rd/utils/graph_drawer.hpp"
#include "rd/utils/cmd_line_parser.hpp"
#include "rd/utils/utilities.hpp"
#include "rd/utils/memory.h"
#include "rd/utils/name_traits.hpp"
#include "tests/test_util.hpp"
#include "cub/test_util.h"
#include "rd/utils/rd_params.hpp"
//------------------------------------------------------------
// GLOBAL CONSTANTS / VARIABLES
//------------------------------------------------------------
static const std::string LOG_FILE_NAME_SUFFIX = "gpu_brute_force_timings.txt";
std::ofstream * g_logFile = nullptr;
bool g_logPerfResults = false;
bool g_drawResultsGraph = false;
std::string g_devName;
#if defined(RD_PROFILE) || defined(RD_DEBUG) || defined(QUICK_TEST)
static constexpr int g_iterations = 1;
#else
static constexpr int g_iterations = 5;
#endif
static int g_devId = 0;
#ifdef QUICK_TEST
static constexpr int MAX_TEST_DIM = 4;
#else
static constexpr int MIN_TEST_DIM = 2;
static constexpr int MAX_TEST_DIM = 12;
#endif
static constexpr int MAX_POINTS_NUM = int(1e7);
//------------------------------------------------------------
// Utils
//------------------------------------------------------------
/**
* @brief Create if necessary and open log file. Allocate log file stream.
*/
template <typename T>
static void initializeLogFile()
{
if (g_logPerfResults)
{
std::ostringstream logFileName;
logFileName << getCurrDate() << "_" <<
g_devName << "_" << LOG_FILE_NAME_SUFFIX;
std::string logFilePath = rd::findPath("timings/", logFileName.str());
g_logFile = new std::ofstream(logFilePath.c_str(), std::ios::out | std::ios::app);
if (g_logFile->fail())
{
throw std::logic_error("Couldn't open file: " + logFileName.str());
}
*g_logFile << "%" << rd::HLINE << std::endl;
*g_logFile << "% " << typeid(T).name() << std::endl;
*g_logFile << "%" << rd::HLINE << std::endl;
// legend
*g_logFile << "% ";
logValue(*g_logFile, "inPointsNum", 11);
logValue(*g_logFile, "dim", 10);
logValue(*g_logFile, "r1", 10);
logValue(*g_logFile, "r2", 10);
logValue(*g_logFile, "inMemLayout", 11);
logValue(*g_logFile, "outMemLayout", 12);
logValue(*g_logFile, "chosenPtsNum", 12);
logValue(*g_logFile, "avgCpuTime", 10);
logValue(*g_logFile, "minCpuTime", 10);
logValue(*g_logFile, "maxCpuTime", 10);
logValue(*g_logFile, "hausdorffDist", 13);
logValue(*g_logFile, "medianDist", 10);
logValue(*g_logFile, "avgDist", 10);
logValue(*g_logFile, "minDist", 10);
logValue(*g_logFile, "maxDist", 10);
*g_logFile << "\n";
g_logFile->flush();
}
}
//------------------------------------------------------------
// INVOKE AND MEASURE
//------------------------------------------------------------
template <
int DIM,
rd::DataMemoryLayout IN_MEMORY_LAYOUT,
rd::DataMemoryLayout OUT_MEMORY_LAYOUT,
typename T,
typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
static void benchmark(
RdData<T> & dataPack,
rd::RDAssessmentQuality<T> * qualityMeasure,
bool verbose = false)
{
using namespace rd::gpu;
// verbose = true;
bruteForce::RidgeDetection<T, DIM, IN_MEMORY_LAYOUT, OUT_MEMORY_LAYOUT> rdGpu(
dataPack.np,
dataPack.r1,
dataPack.r2,
verbose);
// copy and if necessary transpose input data to gpu device
if (IN_MEMORY_LAYOUT == rd::ROW_MAJOR)
{
rdMemcpy<rd::ROW_MAJOR, rd::ROW_MAJOR, hipMemcpyHostToDevice>(
rdGpu.dP_, dataPack.P, DIM, dataPack.np, DIM, DIM);
}
else
{
rdMemcpy2D<rd::COL_MAJOR, rd::ROW_MAJOR, hipMemcpyHostToDevice>(
rdGpu.dP_, dataPack.P, DIM, dataPack.np, rdGpu.pPitch_, DIM * sizeof(T));
}
checkCudaErrors(hipDeviceSynchronize());
rd::CpuTimer timer;
T medianDist = 0, avgDist = 0, minDist = 0, maxDist = 0, hausdorffDist = 0;
float minCpuTime = std::numeric_limits<float>::max();
float maxCpuTime = std::numeric_limits<float>::lowest();
std::vector<T> chosenPoints;
float testAvgCpuTime = 0.f;
for (int k = 0; k < g_iterations; ++k)
{
chosenPoints.clear();
timer.start();
rdGpu.ridgeDetection();
timer.stop();
float currTime = timer.elapsedMillis(0);
testAvgCpuTime += currTime;
minCpuTime = min(currTime, minCpuTime);
maxCpuTime = max(currTime, maxCpuTime);
rd::CpuTimer qmesTimer, postprcsTimer;
postprcsTimer.start();
rdGpu.getChosenSamplesCount();
dataPack.ns = rdGpu.ns_;
chosenPoints.resize(rdGpu.ns_ * dataPack.dim);
// copy back to host results
if (OUT_MEMORY_LAYOUT == rd::ROW_MAJOR)
{
rdMemcpy<rd::ROW_MAJOR, rd::ROW_MAJOR, hipMemcpyDeviceToHost>(
chosenPoints.data(), rdGpu.dS_, DIM, rdGpu.ns_, DIM, DIM);
}
else
{
rdMemcpy2D<rd::ROW_MAJOR, rd::COL_MAJOR, hipMemcpyDeviceToHost>(
chosenPoints.data(), rdGpu.dS_, rdGpu.ns_, DIM, DIM * sizeof(T),
rdGpu.sPitch_);
}
// chosenPoints.resize(rdGpu.ns_ * dataPack.dim);
// measure assessment quality
qmesTimer.start();
hausdorffDist += qualityMeasure->hausdorffDistance(chosenPoints);
T median, avg, min, max;
qualityMeasure->setDistanceStats(chosenPoints, median, avg, min, max);
avgDist += avg;
medianDist += median;
minDist += min;
maxDist += max;
qmesTimer.stop();
postprcsTimer.stop();
std::cout << "postprocess (quality measure): " << qmesTimer.elapsedMillis(0) << "ms"
<< "\tpostprocess (all): " << postprcsTimer.elapsedMillis(0) << "ms"
<< "\tcomputation cpu time: " << timer.elapsedMillis(0) << "ms" << std::endl;
}
if (dataPack.dim <= 3 && g_drawResultsGraph)
{
rd::GraphDrawer<T> gDrawer;
std::ostringstream graphName;
graphName << typeid(T).name() << "_" << getCurrDateAndTime() << "_"
<< g_devName
<< "_" << rd::DataMemoryLayoutNameTraits<IN_MEMORY_LAYOUT>::shortName
<< "_" << rd::DataMemoryLayoutNameTraits<OUT_MEMORY_LAYOUT>::shortName
<< "_np-" << dataPack.np
<< "_r1-" << dataPack.r1
<< "_r2-" << dataPack.r2
<< "_a-" << dataPack.a
<< "_b-" << dataPack.b
<< "_s-" << dataPack.s
<< "_result";
std::string filePath = rd::findPath("img/", graphName.str());
gDrawer.startGraph(filePath, dataPack.dim);
if (dataPack.dim == 3)
{
gDrawer.setGraph3DConf();
}
gDrawer.addPlotCmd("'-' w p pt 1 lc rgb '#B8E186' ps 0.5 ",
dataPack.P, rd::GraphDrawer<T>::POINTS, dataPack.np);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#D73027' ps 1.3 ",
chosenPoints.data(), rd::GraphDrawer<T>::POINTS, dataPack.ns);
gDrawer.endGraph();
}
testAvgCpuTime /= g_iterations;
hausdorffDist /= g_iterations;
medianDist /= g_iterations;
avgDist /= g_iterations;
minDist /= g_iterations;
maxDist /= g_iterations;
if (g_logFile != nullptr)
{
logValues(*g_logFile,
dataPack.np,
DIM,
dataPack.r1,
dataPack.r2,
std::string(rd::DataMemoryLayoutNameTraits<IN_MEMORY_LAYOUT>::shortName),
std::string(rd::DataMemoryLayoutNameTraits<OUT_MEMORY_LAYOUT>::shortName),
dataPack.ns,
testAvgCpuTime,
minCpuTime,
maxCpuTime,
hausdorffDist,
medianDist,
avgDist,
minDist,
maxDist);
*g_logFile << "\n";
g_logFile->flush();
}
logValues(std::cout,
dataPack.np,
DIM,
dataPack.r1,
dataPack.r2,
std::string(rd::DataMemoryLayoutNameTraits<IN_MEMORY_LAYOUT>::name),
std::string(rd::DataMemoryLayoutNameTraits<OUT_MEMORY_LAYOUT>::name),
dataPack.ns,
testAvgCpuTime,
minCpuTime,
maxCpuTime,
hausdorffDist,
medianDist,
avgDist,
minDist,
maxDist);
std::cout << std::endl;
}
//------------------------------------------------------------
// Test generation
//------------------------------------------------------------
template <
int DIM,
typename T,
typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
static void testMemLayout(
RdData<T> & dataPack,
rd::RDAssessmentQuality<T> * qualityMeasure)
{
benchmark<DIM, rd::COL_MAJOR, rd::COL_MAJOR>(dataPack, qualityMeasure);
#ifndef QUICK_TEST
benchmark<DIM, rd::COL_MAJOR, rd::ROW_MAJOR>(dataPack, qualityMeasure);
benchmark<DIM, rd::ROW_MAJOR, rd::ROW_MAJOR>(dataPack, qualityMeasure);
#endif
}
/**
* @brief Test detection time & quality relative to algorithm parameter values
*/
template <
int DIM,
typename T,
typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
static void testRDParams(
int pointCnt,
int dim,
PointCloud<T> const & pc,
std::vector<T> && points,
rd::RDAssessmentQuality<T> * qualityMeasure)
{
#ifdef QUICK_TEST
T r1 = 1.7f * pc.stddev_;
T r2 = 2.0f * r1;
#else
std::vector<T> r1Vals{0.1f, 0.2f, 0.5f, 1.0f, 1.2f, 1.5f, 1.8f, 2.0f, 3.f, 4.f, 5.f, 10.f};
// std::vector<T> r1Vals{2.0f};
for (T& val : r1Vals)
{
val *= pc.stddev_;
}
for (T r : r1Vals)
#endif
{
RdData<T> dataPack;
dataPack.dim = dim;
dataPack.np = pointCnt;
dataPack.r1 = r;
dataPack.r2 = r * 2.f;
dataPack.s = pc.stddev_;
dataPack.P = points.data();
pc.getCloudParameters(dataPack.a, dataPack.b);
testMemLayout<DIM>(dataPack, qualityMeasure);
}
delete qualityMeasure;
}
/**
* @brief helper structure for static for loop over dimension
*/
struct IterateDimensions
{
template <typename D, typename T>
static void impl(
D idx,
int pointCnt,
PointCloud<T> const & pc)
{
std::cout << rd::HLINE << std::endl;
std::cout << ">>>> Dimension: " << idx << "D\n";
if (g_logPerfResults)
{
T a, b;
pc.getCloudParameters(a, b);
*g_logFile << "%>>>> Dimension: " << idx << "D\n"
<< "% a: " << a << " b: " << b << " s: " << pc.stddev_
<< " pointsNum: " << pointCnt << "\n";
}
testRDParams<D::value>(pointCnt, idx, pc, pc.extractPart(pointCnt, idx),
pc.getQualityMeasurer(idx));
}
};
/**
* @brief Test detection time & quality relative to point dimension
*/
template <
int DIM,
typename T>
struct TestDimensions
{
static void impl(
PointCloud<T> & pc,
int pointCnt)
{
static_assert(DIM != 0, "DIM equal to zero!\n");
initializeLogFile<T>();
if (pc.dim_ < DIM)
{
throw std::runtime_error("Input file data dimensionality"
" is lower than requested!");
}
std::cout << rd::HLINE << std::endl;
std::cout << ">>>> Dimension: " << DIM << "D\n";
if (g_logPerfResults)
{
T a, b;
pc.getCloudParameters(a, b);
*g_logFile << "%>>>> Dimension: " << DIM << "D\n"
<< "% a: " << a << " b: " << b << " s: " << pc.stddev_
<< " pointsNum: " << pointCnt << "\n";
}
testRDParams<DIM>(pointCnt, DIM, pc, pc.extractPart(pointCnt, DIM),
pc.getQualityMeasurer(DIM));
// clean-up
if (g_logPerfResults)
{
g_logFile->close();
delete g_logFile;
}
}
};
template <typename T>
struct TestDimensions<0, T>
{
static void impl(
PointCloud<T> & pc,
int pointCnt)
{
initializeLogFile<T>();
if (pc.dim_ < MAX_TEST_DIM)
{
throw std::runtime_error("Input file data dimensionality"
" is lower than requested!");
}
StaticFor<MIN_TEST_DIM, MAX_TEST_DIM, IterateDimensions>::impl(pointCnt, pc);
// clean-up
if (g_logPerfResults)
{
g_logFile->close();
delete g_logFile;
}
}
};
/**
* @brief Test detection time & quality relative to number of points
*/
template <
typename T,
int DIM = 0,
typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
static void testSize(
PointCloud<T> & pc,
int pointCnt = -1,
bool readFromFile = false)
{
if (pointCnt > 0)
{
if (!readFromFile)
{
pc.pointCnt_ = pointCnt;
pc.dim_ = (DIM == 0) ? MAX_TEST_DIM : DIM;
pc.initializeData();
}
TestDimensions<DIM, T>::impl(pc, pointCnt);
}
else
{
if (!readFromFile)
{
pc.pointCnt_ = MAX_POINTS_NUM;
pc.dim_ = (DIM == 0) ? MAX_TEST_DIM : DIM;
pc.initializeData();
}
for (int k = 1e3; k <= MAX_POINTS_NUM; k *= 10)
{
std::cout << "\n//------------------------------------------"
<< "\n//\t\t pointCnt: " << k
<< "\n//------------------------------------------\n";
TestDimensions<DIM, T>::impl(pc, k);
if (k == MAX_POINTS_NUM) break;
std::cout << "\n//------------------------------------------"
<< "\n//\t\t pointCnt: " << k*2
<< "\n//------------------------------------------\n";
TestDimensions<DIM, T>::impl(pc, k*2);
std::cout << "\n//------------------------------------------"
<< "\n//\t\t pointCnt: " << k*5
<< "\n//------------------------------------------\n";
TestDimensions<DIM, T>::impl(pc, k*5);
}
}
}
//------------------------------------------------------------
// MAIN
//------------------------------------------------------------
int main(int argc, char const **argv)
{
float a = -1.f, b = -1.f, stddev = -1.f, segLength = -1.f;
int pointCnt = -1;
std::string inFilePath = "";
int inFileDataDim = 0;
bool loadDataFromFile = false;
rd::CommandLineArgs args(argc, argv);
if (args.CheckCmdLineFlag("help"))
{
printf("%s \n"
"\t\t[--log <log performance results>]\n"
"\t\t[--rGraphs <draws resuls graphs (if dim <= 3)>]\n"
"\t\t[--a <a parameter of spiral>]\n"
"\t\t[--b <b parameter of spiral>]\n"
"\t\t[--segl <generated N-dimensional segment length>]\n"
"\t\t[--stddev <standard deviation of generated samples>]\n"
"\t\t[--size <number of points>]\n"
"\t\t[--d=<device id>]\n"
"\t\t[--f=<relative to binary, input data file path>]\n"
"\t\t[--fd=<data dimensonality>]\n"
"\n", argv[0]);
exit(0);
}
if (args.CheckCmdLineFlag("log"))
{
g_logPerfResults = true;
}
if (args.CheckCmdLineFlag("a"))
{
args.GetCmdLineArgument("a", a);
}
if (args.CheckCmdLineFlag("b"))
{
args.GetCmdLineArgument("b", b);
}
if (args.CheckCmdLineFlag("segl"))
{
args.GetCmdLineArgument("segl", segLength);
}
if (args.CheckCmdLineFlag("stddev"))
{
args.GetCmdLineArgument("stddev", stddev);
}
if (args.CheckCmdLineFlag("size"))
{
args.GetCmdLineArgument("size", pointCnt);
}
if (args.CheckCmdLineFlag("d"))
{
args.GetCmdLineArgument("d", g_devId);
}
if (args.CheckCmdLineFlag("f"))
{
args.GetCmdLineArgument("f", inFilePath);
loadDataFromFile = true;
}
if (args.CheckCmdLineFlag("fd"))
{
args.GetCmdLineArgument("fd", inFileDataDim);
}
if (args.CheckCmdLineFlag("rGraphs"))
{
g_drawResultsGraph = true;
}
checkCudaErrors(deviceInit(g_devId));
// set device name for logging and drawing purposes
hipDeviceProp_t devProp;
checkCudaErrors(hipGetDeviceProperties(&devProp, g_devId));
g_devName = devProp.name;
if (pointCnt < 0 ||
segLength < 0 ||
a < 0 ||
b < 0 ||
stddev < 0)
{
std::cout << "Have to specify parameters! Rerun with --help for help.\n";
exit(1);
}
#ifdef QUICK_TEST
const int dim = 5;
std::cout << "\n//------------------------------------------"
<< "\n//\t\t float: "
<< "\n//------------------------------------------\n";
// PointCloud<float> && fpc = SpiralPointCloud<float>(a, b, pointCnt, dim, stddev);
// TestDimensions<dim, float>::impl(fpc, pointCnt);
PointCloud<float> && fpc = SegmentPointCloud<float>(segLength, pointCnt, dim, stddev);
TestDimensions<dim, float>::impl(fpc, pointCnt);
std::cout << "\n//------------------------------------------"
<< "\n//\t\t double: "
<< "\n//------------------------------------------\n";
// PointCloud<double> && dpc = SpiralPointCloud<double>(a, b, pointCnt, 2, stddev);
// TestDimensions<2, double>::impl(dpc, pointCnt);
PointCloud<double> && dpc = SegmentPointCloud<double>(segLength, pointCnt, dim, stddev);
TestDimensions<dim, double>::impl(dpc, pointCnt);
#else
#ifndef RD_DOUBLE_PRECISION
// --size=1000000 --segl=1457.75 --a=22.52 --b=11.31 --stddev=4.17 --rGraphs --log --d=0
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (spiral) float: "
<< "\n//------------------------------------------\n";
PointCloud<float> && fpc3d = loadDataFromFile ?
SpiralPointCloud<float>(inFilePath, a, b, pointCnt, 3, stddev) :
SpiralPointCloud<float>(a, b, pointCnt, 3, stddev);
testSize<float, 2>(fpc3d, 0, loadDataFromFile);
testSize<float, 3>(fpc3d, 0, loadDataFromFile);
// --segl=100 --a=0 --b=0 --stddev=2.17 --rGraphs --log --d=0
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (segment) float: "
<< "\n//------------------------------------------\n";
PointCloud<float> && fpc2 = loadDataFromFile ?
SegmentPointCloud<float>(inFilePath, segLength, pointCnt, inFileDataDim, stddev) :
SegmentPointCloud<float>(segLength, 0, 0, stddev);
testSize<float>(fpc2, 0, loadDataFromFile);
#else
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (spiral) double: "
<< "\n//------------------------------------------\n";
PointCloud<double> && dpc3d = loadDataFromFile ?
SpiralPointCloud<double>(inFilePath, a, b, pointCnt, 3, stddev) :
SpiralPointCloud<double>(a, b, pointCnt, 3, stddev);
TestDimensions<2, double>::impl(dpc2d, pointCnt);
TestDimensions<3, double>::impl(dpc3d, pointCnt);
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (segment) double: "
<< "\n//------------------------------------------\n";
PointCloud<double> && dpc2 = loadDataFromFile ?
SegmentPointCloud<double>(inFilePath, segLength, pointCnt, inFileDataDim, stddev) :
SegmentPointCloud<double>(segLength, 0, 0, stddev);
testSize<double>(dpc2, 0, loadDataFromFile);
#endif
#endif
checkCudaErrors(hipDeviceReset());
std::cout << rd::HLINE << std::endl;
std::cout << "END!" << std::endl;
return EXIT_SUCCESS;
}
| 302ec428054d69d8b788fc31395f532004e53d82.cu | /**
* @file simulation.cu
* @author Adam Rogowiec
*
* This file is an integral part of the master thesis entitled:
* "Elaboration and implementation in CUDA technology parallel version of
* estimation of multidimensional random variable density function ridge
* detection algorithm."
* , which is conducted under the supervision of prof. dr hab. inż. Marek
* Nałęcz.
*
* Institute of Control and Computation Engineering Faculty of Electronics and
* Information Technology Warsaw University of Technology 2016
*/
#define BLOCK_TILE_LOAD_V4 1
#include <iostream>
#include <sstream>
#include <typeinfo>
#include <stdexcept>
#include <string>
#include <limits>
#ifdef RD_USE_OPENMP
#include <omp.h>
#endif
#include <helper_cuda.h>
#include <cuda_runtime.h>
#include "rd/gpu/device/brute_force/simulation.cuh"
#include "rd/gpu/device/samples_generator.cuh"
#include "rd/gpu/util/dev_memcpy.cuh"
#include "rd/gpu/util/dev_static_for.cuh"
#include "rd/utils/graph_drawer.hpp"
#include "rd/utils/cmd_line_parser.hpp"
#include "rd/utils/utilities.hpp"
#include "rd/utils/memory.h"
#include "rd/utils/name_traits.hpp"
#include "tests/test_util.hpp"
#include "cub/test_util.h"
#include "rd/utils/rd_params.hpp"
//------------------------------------------------------------
// GLOBAL CONSTANTS / VARIABLES
//------------------------------------------------------------
static const std::string LOG_FILE_NAME_SUFFIX = "gpu_brute_force_timings.txt";
std::ofstream * g_logFile = nullptr;
bool g_logPerfResults = false;
bool g_drawResultsGraph = false;
std::string g_devName;
#if defined(RD_PROFILE) || defined(RD_DEBUG) || defined(QUICK_TEST)
static constexpr int g_iterations = 1;
#else
static constexpr int g_iterations = 5;
#endif
static int g_devId = 0;
#ifdef QUICK_TEST
static constexpr int MAX_TEST_DIM = 4;
#else
static constexpr int MIN_TEST_DIM = 2;
static constexpr int MAX_TEST_DIM = 12;
#endif
static constexpr int MAX_POINTS_NUM = int(1e7);
//------------------------------------------------------------
// Utils
//------------------------------------------------------------
/**
* @brief Create if necessary and open log file. Allocate log file stream.
*/
template <typename T>
static void initializeLogFile()
{
if (g_logPerfResults)
{
std::ostringstream logFileName;
logFileName << getCurrDate() << "_" <<
g_devName << "_" << LOG_FILE_NAME_SUFFIX;
std::string logFilePath = rd::findPath("timings/", logFileName.str());
g_logFile = new std::ofstream(logFilePath.c_str(), std::ios::out | std::ios::app);
if (g_logFile->fail())
{
throw std::logic_error("Couldn't open file: " + logFileName.str());
}
*g_logFile << "%" << rd::HLINE << std::endl;
*g_logFile << "% " << typeid(T).name() << std::endl;
*g_logFile << "%" << rd::HLINE << std::endl;
// legend
*g_logFile << "% ";
logValue(*g_logFile, "inPointsNum", 11);
logValue(*g_logFile, "dim", 10);
logValue(*g_logFile, "r1", 10);
logValue(*g_logFile, "r2", 10);
logValue(*g_logFile, "inMemLayout", 11);
logValue(*g_logFile, "outMemLayout", 12);
logValue(*g_logFile, "chosenPtsNum", 12);
logValue(*g_logFile, "avgCpuTime", 10);
logValue(*g_logFile, "minCpuTime", 10);
logValue(*g_logFile, "maxCpuTime", 10);
logValue(*g_logFile, "hausdorffDist", 13);
logValue(*g_logFile, "medianDist", 10);
logValue(*g_logFile, "avgDist", 10);
logValue(*g_logFile, "minDist", 10);
logValue(*g_logFile, "maxDist", 10);
*g_logFile << "\n";
g_logFile->flush();
}
}
//------------------------------------------------------------
// INVOKE AND MEASURE
//------------------------------------------------------------
template <
int DIM,
rd::DataMemoryLayout IN_MEMORY_LAYOUT,
rd::DataMemoryLayout OUT_MEMORY_LAYOUT,
typename T,
typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
static void benchmark(
RdData<T> & dataPack,
rd::RDAssessmentQuality<T> * qualityMeasure,
bool verbose = false)
{
using namespace rd::gpu;
// verbose = true;
bruteForce::RidgeDetection<T, DIM, IN_MEMORY_LAYOUT, OUT_MEMORY_LAYOUT> rdGpu(
dataPack.np,
dataPack.r1,
dataPack.r2,
verbose);
// copy and if necessary transpose input data to gpu device
if (IN_MEMORY_LAYOUT == rd::ROW_MAJOR)
{
rdMemcpy<rd::ROW_MAJOR, rd::ROW_MAJOR, cudaMemcpyHostToDevice>(
rdGpu.dP_, dataPack.P, DIM, dataPack.np, DIM, DIM);
}
else
{
rdMemcpy2D<rd::COL_MAJOR, rd::ROW_MAJOR, cudaMemcpyHostToDevice>(
rdGpu.dP_, dataPack.P, DIM, dataPack.np, rdGpu.pPitch_, DIM * sizeof(T));
}
checkCudaErrors(cudaDeviceSynchronize());
rd::CpuTimer timer;
T medianDist = 0, avgDist = 0, minDist = 0, maxDist = 0, hausdorffDist = 0;
float minCpuTime = std::numeric_limits<float>::max();
float maxCpuTime = std::numeric_limits<float>::lowest();
std::vector<T> chosenPoints;
float testAvgCpuTime = 0.f;
for (int k = 0; k < g_iterations; ++k)
{
chosenPoints.clear();
timer.start();
rdGpu.ridgeDetection();
timer.stop();
float currTime = timer.elapsedMillis(0);
testAvgCpuTime += currTime;
minCpuTime = min(currTime, minCpuTime);
maxCpuTime = max(currTime, maxCpuTime);
rd::CpuTimer qmesTimer, postprcsTimer;
postprcsTimer.start();
rdGpu.getChosenSamplesCount();
dataPack.ns = rdGpu.ns_;
chosenPoints.resize(rdGpu.ns_ * dataPack.dim);
// copy back to host results
if (OUT_MEMORY_LAYOUT == rd::ROW_MAJOR)
{
rdMemcpy<rd::ROW_MAJOR, rd::ROW_MAJOR, cudaMemcpyDeviceToHost>(
chosenPoints.data(), rdGpu.dS_, DIM, rdGpu.ns_, DIM, DIM);
}
else
{
rdMemcpy2D<rd::ROW_MAJOR, rd::COL_MAJOR, cudaMemcpyDeviceToHost>(
chosenPoints.data(), rdGpu.dS_, rdGpu.ns_, DIM, DIM * sizeof(T),
rdGpu.sPitch_);
}
// chosenPoints.resize(rdGpu.ns_ * dataPack.dim);
// measure assessment quality
qmesTimer.start();
hausdorffDist += qualityMeasure->hausdorffDistance(chosenPoints);
T median, avg, min, max;
qualityMeasure->setDistanceStats(chosenPoints, median, avg, min, max);
avgDist += avg;
medianDist += median;
minDist += min;
maxDist += max;
qmesTimer.stop();
postprcsTimer.stop();
std::cout << "postprocess (quality measure): " << qmesTimer.elapsedMillis(0) << "ms"
<< "\tpostprocess (all): " << postprcsTimer.elapsedMillis(0) << "ms"
<< "\tcomputation cpu time: " << timer.elapsedMillis(0) << "ms" << std::endl;
}
if (dataPack.dim <= 3 && g_drawResultsGraph)
{
rd::GraphDrawer<T> gDrawer;
std::ostringstream graphName;
graphName << typeid(T).name() << "_" << getCurrDateAndTime() << "_"
<< g_devName
<< "_" << rd::DataMemoryLayoutNameTraits<IN_MEMORY_LAYOUT>::shortName
<< "_" << rd::DataMemoryLayoutNameTraits<OUT_MEMORY_LAYOUT>::shortName
<< "_np-" << dataPack.np
<< "_r1-" << dataPack.r1
<< "_r2-" << dataPack.r2
<< "_a-" << dataPack.a
<< "_b-" << dataPack.b
<< "_s-" << dataPack.s
<< "_result";
std::string filePath = rd::findPath("img/", graphName.str());
gDrawer.startGraph(filePath, dataPack.dim);
if (dataPack.dim == 3)
{
gDrawer.setGraph3DConf();
}
gDrawer.addPlotCmd("'-' w p pt 1 lc rgb '#B8E186' ps 0.5 ",
dataPack.P, rd::GraphDrawer<T>::POINTS, dataPack.np);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#D73027' ps 1.3 ",
chosenPoints.data(), rd::GraphDrawer<T>::POINTS, dataPack.ns);
gDrawer.endGraph();
}
testAvgCpuTime /= g_iterations;
hausdorffDist /= g_iterations;
medianDist /= g_iterations;
avgDist /= g_iterations;
minDist /= g_iterations;
maxDist /= g_iterations;
if (g_logFile != nullptr)
{
logValues(*g_logFile,
dataPack.np,
DIM,
dataPack.r1,
dataPack.r2,
std::string(rd::DataMemoryLayoutNameTraits<IN_MEMORY_LAYOUT>::shortName),
std::string(rd::DataMemoryLayoutNameTraits<OUT_MEMORY_LAYOUT>::shortName),
dataPack.ns,
testAvgCpuTime,
minCpuTime,
maxCpuTime,
hausdorffDist,
medianDist,
avgDist,
minDist,
maxDist);
*g_logFile << "\n";
g_logFile->flush();
}
logValues(std::cout,
dataPack.np,
DIM,
dataPack.r1,
dataPack.r2,
std::string(rd::DataMemoryLayoutNameTraits<IN_MEMORY_LAYOUT>::name),
std::string(rd::DataMemoryLayoutNameTraits<OUT_MEMORY_LAYOUT>::name),
dataPack.ns,
testAvgCpuTime,
minCpuTime,
maxCpuTime,
hausdorffDist,
medianDist,
avgDist,
minDist,
maxDist);
std::cout << std::endl;
}
//------------------------------------------------------------
// Test generation
//------------------------------------------------------------
template <
int DIM,
typename T,
typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
static void testMemLayout(
RdData<T> & dataPack,
rd::RDAssessmentQuality<T> * qualityMeasure)
{
benchmark<DIM, rd::COL_MAJOR, rd::COL_MAJOR>(dataPack, qualityMeasure);
#ifndef QUICK_TEST
benchmark<DIM, rd::COL_MAJOR, rd::ROW_MAJOR>(dataPack, qualityMeasure);
benchmark<DIM, rd::ROW_MAJOR, rd::ROW_MAJOR>(dataPack, qualityMeasure);
#endif
}
/**
* @brief Test detection time & quality relative to algorithm parameter values
*/
template <
int DIM,
typename T,
typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
static void testRDParams(
int pointCnt,
int dim,
PointCloud<T> const & pc,
std::vector<T> && points,
rd::RDAssessmentQuality<T> * qualityMeasure)
{
#ifdef QUICK_TEST
T r1 = 1.7f * pc.stddev_;
T r2 = 2.0f * r1;
#else
std::vector<T> r1Vals{0.1f, 0.2f, 0.5f, 1.0f, 1.2f, 1.5f, 1.8f, 2.0f, 3.f, 4.f, 5.f, 10.f};
// std::vector<T> r1Vals{2.0f};
for (T& val : r1Vals)
{
val *= pc.stddev_;
}
for (T r : r1Vals)
#endif
{
RdData<T> dataPack;
dataPack.dim = dim;
dataPack.np = pointCnt;
dataPack.r1 = r;
dataPack.r2 = r * 2.f;
dataPack.s = pc.stddev_;
dataPack.P = points.data();
pc.getCloudParameters(dataPack.a, dataPack.b);
testMemLayout<DIM>(dataPack, qualityMeasure);
}
delete qualityMeasure;
}
/**
* @brief helper structure for static for loop over dimension
*/
struct IterateDimensions
{
template <typename D, typename T>
static void impl(
D idx,
int pointCnt,
PointCloud<T> const & pc)
{
std::cout << rd::HLINE << std::endl;
std::cout << ">>>> Dimension: " << idx << "D\n";
if (g_logPerfResults)
{
T a, b;
pc.getCloudParameters(a, b);
*g_logFile << "%>>>> Dimension: " << idx << "D\n"
<< "% a: " << a << " b: " << b << " s: " << pc.stddev_
<< " pointsNum: " << pointCnt << "\n";
}
testRDParams<D::value>(pointCnt, idx, pc, pc.extractPart(pointCnt, idx),
pc.getQualityMeasurer(idx));
}
};
/**
* @brief Test detection time & quality relative to point dimension
*/
template <
int DIM,
typename T>
struct TestDimensions
{
static void impl(
PointCloud<T> & pc,
int pointCnt)
{
static_assert(DIM != 0, "DIM equal to zero!\n");
initializeLogFile<T>();
if (pc.dim_ < DIM)
{
throw std::runtime_error("Input file data dimensionality"
" is lower than requested!");
}
std::cout << rd::HLINE << std::endl;
std::cout << ">>>> Dimension: " << DIM << "D\n";
if (g_logPerfResults)
{
T a, b;
pc.getCloudParameters(a, b);
*g_logFile << "%>>>> Dimension: " << DIM << "D\n"
<< "% a: " << a << " b: " << b << " s: " << pc.stddev_
<< " pointsNum: " << pointCnt << "\n";
}
testRDParams<DIM>(pointCnt, DIM, pc, pc.extractPart(pointCnt, DIM),
pc.getQualityMeasurer(DIM));
// clean-up
if (g_logPerfResults)
{
g_logFile->close();
delete g_logFile;
}
}
};
template <typename T>
struct TestDimensions<0, T>
{
static void impl(
PointCloud<T> & pc,
int pointCnt)
{
initializeLogFile<T>();
if (pc.dim_ < MAX_TEST_DIM)
{
throw std::runtime_error("Input file data dimensionality"
" is lower than requested!");
}
StaticFor<MIN_TEST_DIM, MAX_TEST_DIM, IterateDimensions>::impl(pointCnt, pc);
// clean-up
if (g_logPerfResults)
{
g_logFile->close();
delete g_logFile;
}
}
};
/**
* @brief Test detection time & quality relative to number of points
*/
template <
typename T,
int DIM = 0,
typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
static void testSize(
PointCloud<T> & pc,
int pointCnt = -1,
bool readFromFile = false)
{
if (pointCnt > 0)
{
if (!readFromFile)
{
pc.pointCnt_ = pointCnt;
pc.dim_ = (DIM == 0) ? MAX_TEST_DIM : DIM;
pc.initializeData();
}
TestDimensions<DIM, T>::impl(pc, pointCnt);
}
else
{
if (!readFromFile)
{
pc.pointCnt_ = MAX_POINTS_NUM;
pc.dim_ = (DIM == 0) ? MAX_TEST_DIM : DIM;
pc.initializeData();
}
for (int k = 1e3; k <= MAX_POINTS_NUM; k *= 10)
{
std::cout << "\n//------------------------------------------"
<< "\n//\t\t pointCnt: " << k
<< "\n//------------------------------------------\n";
TestDimensions<DIM, T>::impl(pc, k);
if (k == MAX_POINTS_NUM) break;
std::cout << "\n//------------------------------------------"
<< "\n//\t\t pointCnt: " << k*2
<< "\n//------------------------------------------\n";
TestDimensions<DIM, T>::impl(pc, k*2);
std::cout << "\n//------------------------------------------"
<< "\n//\t\t pointCnt: " << k*5
<< "\n//------------------------------------------\n";
TestDimensions<DIM, T>::impl(pc, k*5);
}
}
}
//------------------------------------------------------------
// MAIN
//------------------------------------------------------------
int main(int argc, char const **argv)
{
float a = -1.f, b = -1.f, stddev = -1.f, segLength = -1.f;
int pointCnt = -1;
std::string inFilePath = "";
int inFileDataDim = 0;
bool loadDataFromFile = false;
rd::CommandLineArgs args(argc, argv);
if (args.CheckCmdLineFlag("help"))
{
printf("%s \n"
"\t\t[--log <log performance results>]\n"
"\t\t[--rGraphs <draws resuls graphs (if dim <= 3)>]\n"
"\t\t[--a <a parameter of spiral>]\n"
"\t\t[--b <b parameter of spiral>]\n"
"\t\t[--segl <generated N-dimensional segment length>]\n"
"\t\t[--stddev <standard deviation of generated samples>]\n"
"\t\t[--size <number of points>]\n"
"\t\t[--d=<device id>]\n"
"\t\t[--f=<relative to binary, input data file path>]\n"
"\t\t[--fd=<data dimensonality>]\n"
"\n", argv[0]);
exit(0);
}
if (args.CheckCmdLineFlag("log"))
{
g_logPerfResults = true;
}
if (args.CheckCmdLineFlag("a"))
{
args.GetCmdLineArgument("a", a);
}
if (args.CheckCmdLineFlag("b"))
{
args.GetCmdLineArgument("b", b);
}
if (args.CheckCmdLineFlag("segl"))
{
args.GetCmdLineArgument("segl", segLength);
}
if (args.CheckCmdLineFlag("stddev"))
{
args.GetCmdLineArgument("stddev", stddev);
}
if (args.CheckCmdLineFlag("size"))
{
args.GetCmdLineArgument("size", pointCnt);
}
if (args.CheckCmdLineFlag("d"))
{
args.GetCmdLineArgument("d", g_devId);
}
if (args.CheckCmdLineFlag("f"))
{
args.GetCmdLineArgument("f", inFilePath);
loadDataFromFile = true;
}
if (args.CheckCmdLineFlag("fd"))
{
args.GetCmdLineArgument("fd", inFileDataDim);
}
if (args.CheckCmdLineFlag("rGraphs"))
{
g_drawResultsGraph = true;
}
checkCudaErrors(deviceInit(g_devId));
// set device name for logging and drawing purposes
cudaDeviceProp devProp;
checkCudaErrors(cudaGetDeviceProperties(&devProp, g_devId));
g_devName = devProp.name;
if (pointCnt < 0 ||
segLength < 0 ||
a < 0 ||
b < 0 ||
stddev < 0)
{
std::cout << "Have to specify parameters! Rerun with --help for help.\n";
exit(1);
}
#ifdef QUICK_TEST
const int dim = 5;
std::cout << "\n//------------------------------------------"
<< "\n//\t\t float: "
<< "\n//------------------------------------------\n";
// PointCloud<float> && fpc = SpiralPointCloud<float>(a, b, pointCnt, dim, stddev);
// TestDimensions<dim, float>::impl(fpc, pointCnt);
PointCloud<float> && fpc = SegmentPointCloud<float>(segLength, pointCnt, dim, stddev);
TestDimensions<dim, float>::impl(fpc, pointCnt);
std::cout << "\n//------------------------------------------"
<< "\n//\t\t double: "
<< "\n//------------------------------------------\n";
// PointCloud<double> && dpc = SpiralPointCloud<double>(a, b, pointCnt, 2, stddev);
// TestDimensions<2, double>::impl(dpc, pointCnt);
PointCloud<double> && dpc = SegmentPointCloud<double>(segLength, pointCnt, dim, stddev);
TestDimensions<dim, double>::impl(dpc, pointCnt);
#else
#ifndef RD_DOUBLE_PRECISION
// --size=1000000 --segl=1457.75 --a=22.52 --b=11.31 --stddev=4.17 --rGraphs --log --d=0
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (spiral) float: "
<< "\n//------------------------------------------\n";
PointCloud<float> && fpc3d = loadDataFromFile ?
SpiralPointCloud<float>(inFilePath, a, b, pointCnt, 3, stddev) :
SpiralPointCloud<float>(a, b, pointCnt, 3, stddev);
testSize<float, 2>(fpc3d, 0, loadDataFromFile);
testSize<float, 3>(fpc3d, 0, loadDataFromFile);
// --segl=100 --a=0 --b=0 --stddev=2.17 --rGraphs --log --d=0
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (segment) float: "
<< "\n//------------------------------------------\n";
PointCloud<float> && fpc2 = loadDataFromFile ?
SegmentPointCloud<float>(inFilePath, segLength, pointCnt, inFileDataDim, stddev) :
SegmentPointCloud<float>(segLength, 0, 0, stddev);
testSize<float>(fpc2, 0, loadDataFromFile);
#else
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (spiral) double: "
<< "\n//------------------------------------------\n";
PointCloud<double> && dpc3d = loadDataFromFile ?
SpiralPointCloud<double>(inFilePath, a, b, pointCnt, 3, stddev) :
SpiralPointCloud<double>(a, b, pointCnt, 3, stddev);
TestDimensions<2, double>::impl(dpc2d, pointCnt);
TestDimensions<3, double>::impl(dpc3d, pointCnt);
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (segment) double: "
<< "\n//------------------------------------------\n";
PointCloud<double> && dpc2 = loadDataFromFile ?
SegmentPointCloud<double>(inFilePath, segLength, pointCnt, inFileDataDim, stddev) :
SegmentPointCloud<double>(segLength, 0, 0, stddev);
testSize<double>(dpc2, 0, loadDataFromFile);
#endif
#endif
checkCudaErrors(cudaDeviceReset());
std::cout << rd::HLINE << std::endl;
std::cout << "END!" << std::endl;
return EXIT_SUCCESS;
}
|
975dc77d433b74fda665e8a0b4e854c25be8e168.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdio.h>
#include <stdlib.h>
#include "lodepng.h"
/********
Compile with nvcc 2040364_Task3_B.cu lodepng.cpp -o task3b
./task3b
*********/
__global__ void blur_image(unsigned char * gpu_imageOuput, unsigned char * gpu_imageInput,int width,int height){
int counter=0;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i=blockIdx.x;
int j=threadIdx.x;
float t_r=0;
float t_g=0;
float t_b=0;
float t_a=0;
float s=1;
if(i+1 && j-1){
// int pos= idx/2-2;
int pos=blockDim.x * (blockIdx.x+1) + threadIdx.x-1;
int pixel = pos*4;
// t_r=s*gpu_imageInput[idx*4];
// t_g=s*gpu_imageInput[idx*4+1];
// t_b=s*gpu_imageInput[idx*4+2];
// t_a=s*gpu_imageInput[idx*4+3];
t_r += s*gpu_imageInput[pixel];
t_g += s*gpu_imageInput[1+pixel];
t_b += s*gpu_imageInput[2+pixel];
t_a += s*gpu_imageInput[3+pixel];
counter++;
}
if(j+1){
// int pos= idx/2-2;
int pos=blockDim.x * (blockIdx.x) + threadIdx.x+1;
int pixel = pos*4;
// t_r=s*gpu_imageInput[idx*4];
// t_g=s*gpu_imageInput[idx*4+1];
// t_b=s*gpu_imageInput[idx*4+2];
// t_a=s*gpu_imageInput[idx*4+3];
t_r += s*gpu_imageInput[pixel];
t_g += s*gpu_imageInput[1+pixel];
t_b += s*gpu_imageInput[2+pixel];
t_a += s*gpu_imageInput[3+pixel];
counter++;
}
if(i+1 && j+1){
// int pos= idx/2+1;
int pos=blockDim.x * (blockIdx.x+1) + threadIdx.x+1;
int pixel = pos*4;
// t_r=s*gpu_imageInput[idx*4];
// t_g=s*gpu_imageInput[idx*4+1];
// t_b=s*gpu_imageInput[idx*4+2];
// t_a=s*gpu_imageInput[idx*4+3];
t_r += s*gpu_imageInput[pixel];
t_g += s*gpu_imageInput[1+pixel];
t_b += s*gpu_imageInput[2+pixel];
t_a += s*gpu_imageInput[3+pixel];
counter++;
}
if(i+1){
// int pos= idx+1;
int pos=blockDim.x * (blockIdx.x+1) + threadIdx.x;
int pixel = pos*4;
// t_r=s*gpu_imageInput[idx*4];
// t_g=s*gpu_imageInput[idx*4+1];
// t_b=s*gpu_imageInput[idx*4+2];
// t_a=s*gpu_imageInput[idx*4+3];
t_r += s*gpu_imageInput[pixel];
t_g += s*gpu_imageInput[1+pixel];
t_b += s*gpu_imageInput[2+pixel];
t_a += s*gpu_imageInput[3+pixel];
counter++;
}
if(j-1){
// int pos= idx*2-2;
int pos=blockDim.x * (blockIdx.x) + threadIdx.x-1;
int pixel = pos*4;
// t_r=s*gpu_imageInput[idx*4];
// t_g=s*gpu_imageInput[idx*4+1];
// t_b=s*gpu_imageInput[idx*4+2];
// t_a=s*gpu_imageInput[idx*4+3];
t_r += s*gpu_imageInput[pixel];
t_g += s*gpu_imageInput[1+pixel];
t_b += s*gpu_imageInput[2+pixel];
t_a += s*gpu_imageInput[3+pixel];
counter++;
}
if(i-1){
// int pos= idx-1;
int pos=blockDim.x * (blockIdx.x-1) + threadIdx.x;
int pixel = pos*4;
// t_r=s*gpu_imageInput[idx*4];
// t_g=s*gpu_imageInput[idx*4+1];
// t_b=s*gpu_imageInput[idx*4+2];
// t_a=s*gpu_imageInput[idx*4+3];
t_r += s*gpu_imageInput[pixel];
t_g += s*gpu_imageInput[1+pixel];
t_b += s*gpu_imageInput[2+pixel];
t_a += s*gpu_imageInput[3+pixel];
counter++;
}
int current_pixel=idx*4;
gpu_imageOuput[current_pixel]=(int)t_r/counter;
gpu_imageOuput[1+current_pixel]=(int)t_g/counter;
gpu_imageOuput[2+current_pixel]=(int)t_b/counter;
gpu_imageOuput[3+current_pixel]=gpu_imageInput[3+current_pixel];
}
//Calculating Time
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char **argv){
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
unsigned int error;
unsigned int encError;
unsigned char* image;
unsigned int width;
unsigned int height;
const char* filename = "Normal_image.png";
const char* newFileName = "Blurred_image.png";
error = lodepng_decode32_file(&image, &width, &height, filename);
if(error){
printf("error %u: %s\n", error, lodepng_error_text(error));
}
const int ARRAY_SIZE = width*height*4;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(unsigned char);
unsigned char host_imageInput[ARRAY_SIZE * 4];
unsigned char host_imageOutput[ARRAY_SIZE * 4];
for (int i = 0; i < ARRAY_SIZE; i++) {
host_imageInput[i] = image[i];
}
// declare GPU memory pointers
unsigned char * d_in;
unsigned char * d_out;
// allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
hipMalloc((void**) &d_out, ARRAY_BYTES);
hipMemcpy(d_in, host_imageInput, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
hipLaunchKernelGGL(( blur_image), dim3(height), dim3(width), 0, 0, d_out, d_in,width,height);
// copy back the result array to the CPU
hipMemcpy(host_imageOutput, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
encError = lodepng_encode32_file(newFileName, host_imageOutput, width, height);
if(encError){
printf("error %u: %s\n", error, lodepng_error_text(encError));
}
//free(image);
//free(host_imageInput);
hipFree(d_in);
hipFree(d_out);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
| 975dc77d433b74fda665e8a0b4e854c25be8e168.cu | #include <stdio.h>
#include <stdio.h>
#include <stdlib.h>
#include "lodepng.h"
/********
Compile with nvcc 2040364_Task3_B.cu lodepng.cpp -o task3b
./task3b
*********/
__global__ void blur_image(unsigned char * gpu_imageOuput, unsigned char * gpu_imageInput,int width,int height){
int counter=0;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i=blockIdx.x;
int j=threadIdx.x;
float t_r=0;
float t_g=0;
float t_b=0;
float t_a=0;
float s=1;
if(i+1 && j-1){
// int pos= idx/2-2;
int pos=blockDim.x * (blockIdx.x+1) + threadIdx.x-1;
int pixel = pos*4;
// t_r=s*gpu_imageInput[idx*4];
// t_g=s*gpu_imageInput[idx*4+1];
// t_b=s*gpu_imageInput[idx*4+2];
// t_a=s*gpu_imageInput[idx*4+3];
t_r += s*gpu_imageInput[pixel];
t_g += s*gpu_imageInput[1+pixel];
t_b += s*gpu_imageInput[2+pixel];
t_a += s*gpu_imageInput[3+pixel];
counter++;
}
if(j+1){
// int pos= idx/2-2;
int pos=blockDim.x * (blockIdx.x) + threadIdx.x+1;
int pixel = pos*4;
// t_r=s*gpu_imageInput[idx*4];
// t_g=s*gpu_imageInput[idx*4+1];
// t_b=s*gpu_imageInput[idx*4+2];
// t_a=s*gpu_imageInput[idx*4+3];
t_r += s*gpu_imageInput[pixel];
t_g += s*gpu_imageInput[1+pixel];
t_b += s*gpu_imageInput[2+pixel];
t_a += s*gpu_imageInput[3+pixel];
counter++;
}
if(i+1 && j+1){
// int pos= idx/2+1;
int pos=blockDim.x * (blockIdx.x+1) + threadIdx.x+1;
int pixel = pos*4;
// t_r=s*gpu_imageInput[idx*4];
// t_g=s*gpu_imageInput[idx*4+1];
// t_b=s*gpu_imageInput[idx*4+2];
// t_a=s*gpu_imageInput[idx*4+3];
t_r += s*gpu_imageInput[pixel];
t_g += s*gpu_imageInput[1+pixel];
t_b += s*gpu_imageInput[2+pixel];
t_a += s*gpu_imageInput[3+pixel];
counter++;
}
if(i+1){
// int pos= idx+1;
int pos=blockDim.x * (blockIdx.x+1) + threadIdx.x;
int pixel = pos*4;
// t_r=s*gpu_imageInput[idx*4];
// t_g=s*gpu_imageInput[idx*4+1];
// t_b=s*gpu_imageInput[idx*4+2];
// t_a=s*gpu_imageInput[idx*4+3];
t_r += s*gpu_imageInput[pixel];
t_g += s*gpu_imageInput[1+pixel];
t_b += s*gpu_imageInput[2+pixel];
t_a += s*gpu_imageInput[3+pixel];
counter++;
}
if(j-1){
// int pos= idx*2-2;
int pos=blockDim.x * (blockIdx.x) + threadIdx.x-1;
int pixel = pos*4;
// t_r=s*gpu_imageInput[idx*4];
// t_g=s*gpu_imageInput[idx*4+1];
// t_b=s*gpu_imageInput[idx*4+2];
// t_a=s*gpu_imageInput[idx*4+3];
t_r += s*gpu_imageInput[pixel];
t_g += s*gpu_imageInput[1+pixel];
t_b += s*gpu_imageInput[2+pixel];
t_a += s*gpu_imageInput[3+pixel];
counter++;
}
if(i-1){
// int pos= idx-1;
int pos=blockDim.x * (blockIdx.x-1) + threadIdx.x;
int pixel = pos*4;
// t_r=s*gpu_imageInput[idx*4];
// t_g=s*gpu_imageInput[idx*4+1];
// t_b=s*gpu_imageInput[idx*4+2];
// t_a=s*gpu_imageInput[idx*4+3];
t_r += s*gpu_imageInput[pixel];
t_g += s*gpu_imageInput[1+pixel];
t_b += s*gpu_imageInput[2+pixel];
t_a += s*gpu_imageInput[3+pixel];
counter++;
}
int current_pixel=idx*4;
gpu_imageOuput[current_pixel]=(int)t_r/counter;
gpu_imageOuput[1+current_pixel]=(int)t_g/counter;
gpu_imageOuput[2+current_pixel]=(int)t_b/counter;
gpu_imageOuput[3+current_pixel]=gpu_imageInput[3+current_pixel];
}
//Calculating Time
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char **argv){
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
unsigned int error;
unsigned int encError;
unsigned char* image;
unsigned int width;
unsigned int height;
const char* filename = "Normal_image.png";
const char* newFileName = "Blurred_image.png";
error = lodepng_decode32_file(&image, &width, &height, filename);
if(error){
printf("error %u: %s\n", error, lodepng_error_text(error));
}
const int ARRAY_SIZE = width*height*4;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(unsigned char);
unsigned char host_imageInput[ARRAY_SIZE * 4];
unsigned char host_imageOutput[ARRAY_SIZE * 4];
for (int i = 0; i < ARRAY_SIZE; i++) {
host_imageInput[i] = image[i];
}
// declare GPU memory pointers
unsigned char * d_in;
unsigned char * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
cudaMemcpy(d_in, host_imageInput, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
blur_image<<<height, width>>>(d_out, d_in,width,height);
// copy back the result array to the CPU
cudaMemcpy(host_imageOutput, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
encError = lodepng_encode32_file(newFileName, host_imageOutput, width, height);
if(encError){
printf("error %u: %s\n", error, lodepng_error_text(encError));
}
//free(image);
//free(host_imageInput);
cudaFree(d_in);
cudaFree(d_out);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
b9f4d67cc8f5765b81fa8f86133aafc0ca2929a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DLLExport
#define TestExport
#include "../../lib_gpu/include/cuda_kernel_helpers.hpp"
#include "gpurf.h"
namespace lib_ensembles {
template <typename T>
__global__ void host_kernel(GpuRf<T>* gpu_algo,
GpuDteAlgorithmShared::GpuParams<T> params,
GpuDteAlgorithmShared::GpuDteKernelId type) {
gpu_algo->GetConstPointers(¶ms.iteration_info, ¶ms.dataset_info,
¶ms.static_info);
switch (type) {
case GpuDteAlgorithmShared::kSetupKernel:
gpu_algo->gpurf_setup_kernel(¶ms);
break;
case GpuDteAlgorithmShared::kInitTreeBatch:
gpu_algo->gpurf_initialize_tree_batch(¶ms);
break;
case GpuDteAlgorithmShared::kFindSplit:
gpu_algo->gpurf_find_split(¶ms);
break;
case GpuDteAlgorithmShared::kPerformSplit:
gpu_algo->gpurf_perform_split(¶ms);
break;
case GpuDteAlgorithmShared::kPredict:
gpu_algo->gpurf_predict(¶ms);
break;
case GpuDteAlgorithmShared::kOobEstimate:
gpu_algo->gpurf_oob_estimate(¶ms);
break;
case GpuDteAlgorithmShared::kFeatureImp:
gpu_algo->gpurf_feature_importance(¶ms);
break;
default:
break;
}
}
template <typename T>
void GpuRf<T>::CallCudaKernel(int blocks, int block_size,
GpuDteAlgorithmShared::GpuParams<T>& params,
GpuDteAlgorithmShared::GpuDteKernelId id) {
hipLaunchKernelGGL(( host_kernel<T>), dim3(blocks), dim3(block_size), 0, 0, this, params, id);
}
template <typename T>
__device__ void GpuRf<T>::gpurf_setup_kernel(
GpuDteAlgorithmShared::GpuParams<T>* params) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
// Each thread gets same seed, a different sequence number, no offset
if (id < params->iteration_info->threads_launched)
hiprand_init(324123, id, 0, ¶ms->random_states[id]);
}
template <typename T>
__device__ void GpuRf<T>::gpurf_initialize_tree_batch(
GpuDteAlgorithmShared::GpuParams<T>* params) {
__shared__ int s_indexCursor;
if (threadIdx.x == 0) {
if (params->static_info->balanced_sampling &&
params->dataset_info->data_type == type_classification_)
s_indexCursor = 0;
else
s_indexCursor = params->dataset_info->nr_instances;
}
__syncthreads();
int treeOffset = params->dataset_info->nr_instances * blockIdx.x;
if (params->dataset_info->data_type == type_classification_ &&
params->static_info->balanced_sampling) {
// Initialize probability main buffer
int localCursor;
int randVal;
int stateId = (blockIdx.x * blockDim.x + threadIdx.x) %
params->static_info->node_buffer_size;
curandStateMRG32k3a localState = params->random_states[stateId];
for (int i = 0; i < params->dataset_info->nr_target_values; ++i) {
int targetStart = params->target_starts[i];
int targetEnd = (i == params->dataset_info->nr_target_values - 1)
? params->dataset_info->nr_instances - 1
: params->target_starts[i + 1] - 1;
for (int ii = threadIdx.x;
ii < params->dataset_info->nr_instances /
params->dataset_info->nr_target_values;
ii += blockDim.x) {
localCursor = GpuDte<T>::AtomicAdd(&s_indexCursor, 1);
if (targetEnd - targetStart > 0)
randVal =
targetStart + hiprand(&localState) % (targetEnd - targetStart);
else
randVal = targetStart;
params->indices_buffer[0][treeOffset + localCursor] = randVal;
params->indices_inbag[treeOffset + randVal] = true;
}
}
} else {
// Initialize indices main buffer
int randVal;
int stateId = (blockIdx.x * blockDim.x + threadIdx.x) %
params->static_info->node_buffer_size;
curandStateMRG32k3a localState = params->random_states[stateId];
for (int i = threadIdx.x; i < params->dataset_info->nr_instances;
i += blockDim.x) {
randVal = hiprand(&localState) % params->dataset_info->nr_instances;
params->indices_buffer[0][treeOffset + i] = randVal;
params->indices_inbag[treeOffset + randVal] = true;
}
}
__syncthreads();
if (threadIdx.x == 0) {
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T> root;
root.parent_id = -2;
root.attribute = -2;
root.split_point = -2;
root.tracking_id = GpuDte<T>::AtomicAdd(¶ms->node_cursors[node_id_], 1);
root.node_index_start = treeOffset;
root.node_index_count = s_indexCursor;
params->node_buffers[params->iteration_info->read_buffer_id][blockIdx.x] =
root;
}
}
template <typename T>
__device__ void GpuRf<T>::gpurf_find_split(
GpuDteAlgorithmShared::GpuParams<T>* params) {
__shared__ T s_dynamic_shared[40];
__shared__ unsigned int s_histograms[1024];
__shared__ unsigned int s_offsets[256];
__shared__ GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T> s_tree_node;
__shared__ GpuDteAlgorithmShared::gpuDTE_TmpNodeValues<T> s_tmp_node;
__shared__ int s_attribute_type;
__shared__ bool s_sensible_split;
curandStateMRG32k3a localState;
if (threadIdx.x == 0) {
localState = params->random_states[blockIdx.x];
s_tree_node =
params->node_buffers[params->iteration_info->read_buffer_id]
[blockIdx.x + params->iteration_info->node_offset];
s_tmp_node =
params
->node_tmp_buffer[blockIdx.x + params->iteration_info->node_offset];
s_tmp_node.tmp_score = 0;
s_sensible_split = false;
for (int i = 0; i < params->dataset_info->nr_target_values * max_nominal_;
++i)
s_dynamic_shared[i] = 0;
}
__syncthreads();
// Find prior distribution
for (int i = threadIdx.x; i < s_tree_node.node_index_count; i += blockDim.x) {
int inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[s_tree_node.node_index_start + i];
switch (params->dataset_info->data_type) {
case type_classification_:
GpuDte<T>::AtomicAdd(&s_dynamic_shared[int(params->target_data[inst])],
T(1));
break;
case type_regression_:
GpuDte<T>::AtomicAdd(&s_dynamic_shared[0], params->target_data[inst]);
break;
}
}
__syncthreads();
bool firstFeature = true;
int k = params->static_info->nr_features;
int max_retries = k - params->dataset_info->nr_attributes < -10
? -10
: k - params->dataset_info->nr_attributes;
while ((k > max_retries) && (k-- > 0 || !s_sensible_split)) {
if (threadIdx.x == 0) {
s_tmp_node.tmp_attribute =
hiprand(&localState) % params->dataset_info->nr_attributes;
s_attribute_type = params->attribute_type[s_tmp_node.tmp_attribute];
s_attribute_type =
s_attribute_type >= max_nominal_ ? 2 : s_attribute_type;
}
__syncthreads();
// Sort indices on attribute
radix_sort_on_attribute(params, s_tree_node, s_tmp_node, s_histograms,
s_offsets);
__syncthreads();
T response;
switch (params->dataset_info->data_type) {
case type_classification_:
response = eval_numeric_attribute(params, s_tree_node, s_tmp_node,
s_dynamic_shared, s_attribute_type,
s_histograms, s_offsets);
break;
case type_regression_:
response =
GpuRf::variance_calculation(params, s_tree_node, s_tmp_node,
s_dynamic_shared, (T*)s_histograms);
break;
}
if (threadIdx.x == 0) {
if (s_tmp_node.tmp_score < response || firstFeature) {
// Save splitpoint, attribute and distribution
s_tmp_node.tmp_score = response;
s_tree_node.split_point = s_tmp_node.tmp_split;
s_tree_node.attribute = s_tmp_node.tmp_attribute;
switch (params->dataset_info->data_type) {
case type_classification_:
for (int i = 0;
i < params->dataset_info->nr_target_values * max_nominal_; ++i)
params->probability_tmp_buffer
[blockIdx.x * params->dataset_info->nr_target_values *
max_nominal_ +
i] = s_dynamic_shared[i];
break;
case type_regression_:
params->probability_tmp_buffer
[blockIdx.x * params->dataset_info->nr_target_values *
max_nominal_] = s_dynamic_shared[2];
params->probability_tmp_buffer
[blockIdx.x * params->dataset_info->nr_target_values *
max_nominal_ +
1] = s_dynamic_shared[3];
break;
}
}
if (s_tmp_node.tmp_score > 1e-3) s_sensible_split = true;
firstFeature = false;
}
__syncthreads();
}
// Copy back result
if (threadIdx.x == 0) {
params->node_buffers[params->iteration_info->read_buffer_id]
[blockIdx.x + params->iteration_info->node_offset] =
s_tree_node;
params->node_tmp_buffer[blockIdx.x + params->iteration_info->node_offset] =
s_tmp_node;
params->random_states[blockIdx.x] = localState;
}
}
template <typename T>
__device__ void GpuRf<T>::gpurf_perform_split(
GpuDteAlgorithmShared::GpuParams<T>* params) {
__shared__ int s_node_counts[40];
gpudte_perform_split(*(params->static_info), *(params->dataset_info),
*(params->iteration_info), params->probability_buffers,
params->probability_tmp_buffer, params->dataset,
params->attribute_type, s_node_counts,
params->indices_buffer, params->node_cursors,
params->node_buffers);
}
template <typename T>
__device__ void GpuRf<T>::gpurf_predict(
GpuDteAlgorithmShared::GpuParams<T>* params) {
if (threadIdx.x + blockIdx.x * blockDim.x >=
params->iteration_info->threads_launched)
return;
int tid = params->iteration_info->tree_offset + threadIdx.x +
blockIdx.x * blockDim.x;
gpudte_predict(
tid, params->dataset_info->nr_instances, params->dataset_info->data_type,
params->dataset_info->nr_target_values, params->node_buffers_classify,
params->dataset, params->probability_tmp_buffer, params->predictions,
params->attribute_type);
}
template <typename T>
__device__ void GpuRf<T>::gpurf_oob_estimate(
GpuDteAlgorithmShared::GpuParams<T>* params) {
if (threadIdx.x + blockIdx.x * blockDim.x >=
params->iteration_info->threads_launched)
return;
int tid = params->iteration_info->tree_offset + threadIdx.x +
blockIdx.x * blockDim.x;
int instanceId = tid % params->dataset_info->nr_instances;
int treeId = T(tid) / T(params->dataset_info->nr_instances);
bool inBag =
params->indices_inbag[params->dataset_info->nr_instances * treeId +
instanceId];
if (inBag) return;
T dataPoint;
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Classify<T> tree_node =
params->node_buffers_classify[treeId];
while (tree_node.child_count != 0) {
int attribute_type = params->attribute_type[tree_node.attribute];
dataPoint =
get_data_point(tree_node.attribute, instanceId,
params->dataset_info->nr_instances, params->dataset);
if (attribute_type > max_nominal_ ||
params->dataset_info->data_type == type_regression_) {
if (dataPoint != -flt_max)
tree_node =
(dataPoint < tree_node.split_point)
? params->node_buffers_classify[tree_node.child_start]
: params->node_buffers_classify[tree_node.child_start + 1];
else
tree_node = params->node_buffers_classify[tree_node.child_start];
} else {
if (dataPoint != -flt_max)
tree_node =
params
->node_buffers_classify[tree_node.child_start + int(dataPoint)];
else
tree_node = params->node_buffers_classify[tree_node.child_start];
}
}
switch (params->dataset_info->data_type) {
case type_classification_: {
int classSelector = 0;
T max_prob = 0;
for (int i = 0; i < params->dataset_info->nr_target_values; ++i) {
if (params->probability_tmp_buffer[tree_node.probability_start + i] >
max_prob) {
max_prob =
params->probability_tmp_buffer[tree_node.probability_start + i];
classSelector = i;
}
}
if (params->target_data[instanceId] == classSelector)
GpuDte<T>::AtomicAdd(¶ms->oobCounts[0], 1);
else
GpuDte<T>::AtomicAdd(¶ms->oobCounts[1], 1);
break;
}
case type_regression_:
GpuDte<T>::AtomicAdd(
¶ms->mse[0],
params->probability_tmp_buffer[tree_node.probability_start]);
break;
}
}
template <typename T>
__device__ void GpuRf<T>::gpurf_feature_importance(
GpuDteAlgorithmShared::GpuParams<T>* params) {
if (threadIdx.x + blockIdx.x * blockDim.x >=
params->iteration_info->threads_launched)
return;
int randAttribute = params->iteration_info->depth;
int tid = params->iteration_info->tree_offset + threadIdx.x +
blockIdx.x * blockDim.x;
int instanceId = tid % params->dataset_info->nr_instances;
int treeId = T(tid) / T(params->dataset_info->nr_instances);
bool inBag =
params->indices_inbag[params->dataset_info->nr_instances * treeId +
instanceId];
if (inBag) return;
int instance;
curandStateMRG32k3a localState = params->random_states[blockIdx.x];
T dataPoint;
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Classify<T> tree_node =
params->node_buffers_classify[treeId];
while (tree_node.child_count != 0) {
int attribute_type = params->attribute_type[tree_node.attribute];
if (randAttribute == tree_node.attribute)
instance = hiprand(&localState) % params->dataset_info->nr_instances;
else
instance = instanceId;
dataPoint =
get_data_point(tree_node.attribute, instance,
params->dataset_info->nr_instances, params->dataset);
if (attribute_type > max_nominal_ ||
params->dataset_info->data_type == type_regression_) {
if (dataPoint != -flt_max)
tree_node =
(dataPoint < tree_node.split_point)
? params->node_buffers_classify[tree_node.child_start]
: params->node_buffers_classify[tree_node.child_start + 1];
else
tree_node = params->node_buffers_classify[tree_node.child_start];
} else {
if (dataPoint != -flt_max)
tree_node =
params
->node_buffers_classify[tree_node.child_start + int(dataPoint)];
else
tree_node = params->node_buffers_classify[tree_node.child_start];
}
}
switch (params->dataset_info->data_type) {
case type_classification_: {
int classSelector = 0;
T max_prob = 0;
for (int i = 0; i < params->dataset_info->nr_target_values; ++i) {
if (params->probability_tmp_buffer[tree_node.probability_start + i] >
max_prob) {
max_prob =
params->probability_tmp_buffer[tree_node.probability_start + i];
classSelector = i;
}
}
if (params->target_data[instanceId] == classSelector)
GpuDte<T>::AtomicAdd(¶ms->oobCounts[randAttribute * 2], 1);
else
GpuDte<T>::AtomicAdd(¶ms->oobCounts[randAttribute * 2 + 1], 1);
break;
}
case type_regression_:
GpuDte<T>::AtomicAdd(
¶ms->mse[randAttribute],
params->probability_tmp_buffer[tree_node.probability_start]);
break;
}
}
template <typename T>
__device__ void GpuRf<T>::radix_sort_on_attribute(
GpuDteAlgorithmShared::GpuParams<T>* params,
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T>& node,
GpuDteAlgorithmShared::gpuDTE_TmpNodeValues<T>& tmp_node,
unsigned int s_histograms[1024], unsigned int s_offsets[256]) {
__shared__ unsigned int s_nrNegativeValues;
__shared__ unsigned char s_thread_radix[64];
if (threadIdx.x == 0) s_nrNegativeValues = 0;
unsigned int* input =
(unsigned int*)¶ms->dataset[tmp_node.tmp_attribute *
params->dataset_info->nr_instances];
unsigned int* indices =
(unsigned int*)¶ms
->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[node.node_index_start];
unsigned int* indices2 =
(unsigned int*)¶ms
->indices_buffer[params->iteration_info->tick_tock ? 1 : 0]
[node.node_index_start];
for (int i = threadIdx.x; i < 1024; i += blockDim.x) s_histograms[i] = 0;
__syncthreads();
unsigned char* dataVal;
for (int i = threadIdx.x; i < node.node_index_count; i += blockDim.x) {
dataVal = (unsigned char*)&input[indices[i]];
GpuDte<T>::AtomicAdd(&s_histograms[*dataVal], 1);
GpuDte<T>::AtomicAdd(&s_histograms[256 + (*(dataVal + 1))], 1);
GpuDte<T>::AtomicAdd(&s_histograms[512 + (*(dataVal + 2))], 1);
GpuDte<T>::AtomicAdd(&s_histograms[768 + (*(dataVal + 3))], 1);
}
__syncthreads();
for (int i = threadIdx.x + 128; i < 256; i += blockDim.x)
GpuDte<T>::AtomicAdd(&s_nrNegativeValues, s_histograms[768 + i]);
// Radix sort, j is the pass number (0=LSB, 3=MSB)
bool performPass;
unsigned int* curCount;
unsigned char uniqueVal;
for (int j = 0; j < 4; j++) {
__syncthreads();
performPass = true;
curCount = &s_histograms[j << 8];
uniqueVal = *(((unsigned char*)&input[indices[0]]) + j);
if (curCount[uniqueVal] == node.node_index_count) performPass = false;
// Should we care about negative values?
if (j != 3) {
// Here we deal with positive values only
if (performPass) {
// Create offsets
lib_gpu::CudaKernelHelpers::inplace_prefixsum(curCount, 256);
// Perform Radix Sort
bool skip;
unsigned int id, spot;
unsigned char radix;
for (int i = 0; i < node.node_index_count; i += blockDim.x) {
skip = threadIdx.x + i >= node.node_index_count;
if (!skip) {
id = indices[threadIdx.x + i];
radix = *(((unsigned char*)&input[id]) + j);
s_thread_radix[threadIdx.x] = radix;
spot = curCount[radix];
}
__syncthreads();
if (!skip) {
GpuDte<T>::AtomicAdd(&curCount[radix], 1);
for (int ii = threadIdx.x; ii > 0; --ii)
if (s_thread_radix[ii - 1] == radix) ++spot;
indices2[spot] = id;
}
__syncthreads();
}
unsigned int* Tmp = indices;
indices = indices2;
indices2 = Tmp;
}
} else {
// This is a special case to correctly handle negative values
if (performPass) {
// Create biased offsets, in order for negative numbers to be sorted as well
#pragma unroll
for (int i = threadIdx.x; i < 256; i += blockDim.x)
s_offsets[i] = curCount[i];
__syncthreads();
lib_gpu::CudaKernelHelpers::inplace_prefixsum(s_offsets, 128);
if (threadIdx.x == 0) s_offsets[0] = s_nrNegativeValues;
lib_gpu::CudaKernelHelpers::inplace_reverse_prefixsum(&s_offsets[128],
128);
// Fixing the wrong place for negative values
#pragma unroll
for (int i = threadIdx.x + 128; i < 256; i += blockDim.x)
s_offsets[i] += curCount[i];
__syncthreads();
bool skip;
int spot;
unsigned int id;
unsigned char radix;
for (int i = 0; i < node.node_index_count; i += blockDim.x) {
skip = threadIdx.x + i >= node.node_index_count;
if (!skip) {
id = indices[threadIdx.x + i];
radix = input[id] >> 24;
s_thread_radix[threadIdx.x] = radix;
if (radix < 128)
spot = s_offsets[radix];
else
spot = s_offsets[radix] - 1;
}
__syncthreads();
if (!skip) {
if (radix < 128)
GpuDte<T>::AtomicAdd((int*)&s_offsets[radix], 1);
else
GpuDte<T>::AtomicAdd((int*)&s_offsets[radix], -1);
for (int ii = threadIdx.x; ii > 0; --ii)
if (s_thread_radix[ii - 1] == radix) spot += radix < 128 ? 1 : -1;
indices2[spot] = id;
}
__syncthreads();
}
unsigned int* Tmp = indices;
indices = indices2;
indices2 = Tmp;
} else {
// The pass is useless, yet we still have to reverse the order of
// current list if all values are negative.
if (uniqueVal >= 128) {
for (unsigned int i = threadIdx.x; i < node.node_index_count;
i += blockDim.x)
indices2[i] = indices[node.node_index_count - i - 1];
// Swap pointers for next pass. Valid indices - the most recent ones -
// are in mIndices after the swap.
unsigned int* Tmp = indices;
indices = indices2;
indices2 = Tmp;
}
}
}
}
__syncthreads();
// Need to copy back to the correct indices buffer
if (indices !=
(unsigned int*)¶ms
->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[node.node_index_start]) {
for (int i = threadIdx.x; i < node.node_index_count; i += blockDim.x)
indices2[i] = indices[i];
}
}
template <typename T>
__device__ T GpuRf<T>::eval_numeric_attribute(
GpuDteAlgorithmShared::GpuParams<T>* params,
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T>& node,
GpuDteAlgorithmShared::gpuDTE_TmpNodeValues<T>& tmp_node, T* curr_dist,
int att_type, unsigned int* s_histograms, unsigned int* s_offsets) {
__shared__ T local_dist[40];
int numInds = node.node_index_count;
int nodeIndStart = node.node_index_start;
int inst, bestI = 0;
T val, preVal;
T response, bestResponse = 0.0f;
for (int i = 0; i < params->dataset_info->nr_target_values; ++i)
local_dist[i + params->dataset_info->nr_target_values] = 0;
for (int i = 0; i < params->dataset_info->nr_target_values; ++i)
local_dist[i] =
curr_dist[i] + curr_dist[i + params->dataset_info->nr_target_values];
s_offsets[threadIdx.x] = 0;
T prior = entropy_over_columns((T*)curr_dist, att_type,
params->dataset_info->nr_target_values);
__syncthreads();
for (int i = threadIdx.x;
i < params->dataset_info->nr_target_values * max_nominal_;
i += blockDim.x)
curr_dist[i] = local_dist[i];
__syncthreads();
// Find best split on attribute
bool skip;
for (int i = 0; i < numInds; i += blockDim.x) {
skip = threadIdx.x + i >= node.node_index_count;
if (!skip) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i + threadIdx.x];
val = get_data_point(tmp_node.tmp_attribute, inst,
params->dataset_info->nr_instances, params->dataset);
s_offsets[threadIdx.x] = int(params->target_data[inst]);
if (i + threadIdx.x != 0) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i + threadIdx.x - 1];
preVal =
get_data_point(tmp_node.tmp_attribute, inst,
params->dataset_info->nr_instances, params->dataset);
}
GpuDte<T>::AtomicAdd(&curr_dist[params->dataset_info->nr_target_values +
s_offsets[threadIdx.x]],
T(1));
GpuDte<T>::AtomicAdd(&curr_dist[s_offsets[threadIdx.x]], T(-1));
}
__syncthreads();
if (!skip) {
for (int ii = threadIdx.x; ii >= 0; --ii) {
++local_dist[params->dataset_info->nr_target_values + s_offsets[ii]];
--local_dist[s_offsets[ii]];
}
response = prior - entropy_conditioned_on_rows(
local_dist, att_type,
params->dataset_info->nr_target_values);
if (bestResponse < response && (preVal < val || threadIdx.x + i == 0)) {
bestResponse = response;
bestI = i + threadIdx.x;
}
for (int ii = 0;
ii < params->dataset_info->nr_target_values * max_nominal_; ++ii)
local_dist[ii] = curr_dist[ii];
}
__syncthreads();
}
T* responses = (T*)s_offsets;
responses[threadIdx.x] = bestResponse;
s_offsets[threadIdx.x + blockDim.x] = bestI;
for (int i = threadIdx.x;
i < params->dataset_info->nr_target_values * max_nominal_;
i += blockDim.x)
curr_dist[i] = 0;
__syncthreads();
for (int i = blockDim.x >> 1; i > 0; i >>= 1) {
if (threadIdx.x < i) {
if (responses[i + threadIdx.x] > responses[threadIdx.x]) {
responses[threadIdx.x] = responses[i + threadIdx.x];
s_offsets[blockDim.x + threadIdx.x] =
s_offsets[blockDim.x + threadIdx.x + i];
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
bestI = s_offsets[blockDim.x];
bestResponse = responses[threadIdx.x];
if (bestI > 0) {
T pointBeforeSplit = 0.0f, pointAfterSplit = 0.0f;
int instJustBeforeSplit, instJustAfterSplit;
instJustBeforeSplit =
params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + bestI - 1];
instJustAfterSplit =
params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + bestI];
pointBeforeSplit =
get_data_point(tmp_node.tmp_attribute, instJustBeforeSplit,
params->dataset_info->nr_instances, params->dataset);
pointAfterSplit =
get_data_point(tmp_node.tmp_attribute, instJustAfterSplit,
params->dataset_info->nr_instances, params->dataset);
tmp_node.tmp_split = (pointAfterSplit + pointBeforeSplit) / 2.0f;
} else
tmp_node.tmp_split = 0;
}
__syncthreads();
// Assemble new distribution
for (int i = threadIdx.x; i < numInds; i += blockDim.x) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i];
val = get_data_point(tmp_node.tmp_attribute, inst,
params->dataset_info->nr_instances, params->dataset);
if (val != -flt_max)
GpuDte<T>::AtomicAdd(&curr_dist[params->dataset_info->nr_target_values *
((val < tmp_node.tmp_split) ? 0 : 1) +
int(params->target_data[inst])],
T(1));
else
GpuDte<T>::AtomicAdd(&curr_dist[int(params->target_data[inst])], T(1));
}
return bestResponse;
}
template <typename T>
__device__ T GpuRf<T>::variance_calculation(
GpuDteAlgorithmShared::GpuParams<T>* params,
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T>& node,
GpuDteAlgorithmShared::gpuDTE_TmpNodeValues<T>& tmp_node, T* curr_dist,
T* s_histograms) {
int numInds = node.node_index_count;
int nodeIndStart = node.node_index_start;
int attribute = tmp_node.tmp_attribute;
int inst, bestI = 0;
T val, means[2], bestGain = 0.0f, preVal;
means[0] = 0;
if (threadIdx.x == 0) curr_dist[0] = 0.0f;
// Calculate mean values from split
for (int i = threadIdx.x; i < numInds; i += blockDim.x) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i];
val = get_data_point(attribute, inst, params->dataset_info->nr_instances,
params->dataset);
if (val != -flt_max)
GpuDte<T>::AtomicAdd(&curr_dist[1], params->target_data[inst]);
}
__syncthreads();
means[1] = curr_dist[1];
T gain = 0;
bool skip;
for (int i = 0; i < numInds; i += blockDim.x) {
skip = threadIdx.x + i >= numInds;
if (!skip) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i + threadIdx.x];
val = get_data_point(attribute, inst, params->dataset_info->nr_instances,
params->dataset);
s_histograms[threadIdx.x] = val;
if (i + threadIdx.x != 0) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i + threadIdx.x - 1];
preVal =
get_data_point(tmp_node.tmp_attribute, inst,
params->dataset_info->nr_instances, params->dataset);
}
GpuDte<T>::AtomicAdd(&curr_dist[0], val);
GpuDte<T>::AtomicAdd(&curr_dist[1], -val);
}
__syncthreads();
if (!skip) {
for (int ii = threadIdx.x; ii >= 0; --ii) {
means[0] += s_histograms[ii];
means[1] -= s_histograms[ii];
}
T nLeft = i + threadIdx.x + 1;
T nRight = numInds - nLeft;
if (nRight < 1) nRight = 1;
T diff = ((means[0] / nLeft) - (means[1] / nRight));
gain = (nLeft * nRight * diff * diff / (nLeft + nRight));
if (bestGain < gain && (preVal < val || threadIdx.x + i == 0)) {
bestGain = gain;
bestI = i + threadIdx.x;
}
means[0] = curr_dist[0];
means[1] = curr_dist[1];
}
__syncthreads();
}
T* responses = (T*)s_histograms;
responses[threadIdx.x] = bestGain;
s_histograms[threadIdx.x + blockDim.x] = bestI;
__syncthreads();
for (int i = blockDim.x >> 1; i > 0; i >>= 1) {
if (threadIdx.x < i) {
if (responses[i + threadIdx.x] > responses[threadIdx.x]) {
responses[threadIdx.x] = responses[i + threadIdx.x];
s_histograms[blockDim.x + threadIdx.x] =
s_histograms[blockDim.x + threadIdx.x + i];
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
bestI = s_histograms[blockDim.x];
bestGain = responses[threadIdx.x];
if (bestI > 0) {
T pointBeforeSplit = 0.0f, pointAfterSplit = 0.0f;
int instJustBeforeSplit, instJustAfterSplit;
instJustBeforeSplit =
params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + bestI - 1];
instJustAfterSplit =
params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + bestI];
pointBeforeSplit =
get_data_point(tmp_node.tmp_attribute, instJustBeforeSplit,
params->dataset_info->nr_instances, params->dataset);
pointAfterSplit =
get_data_point(tmp_node.tmp_attribute, instJustAfterSplit,
params->dataset_info->nr_instances, params->dataset);
tmp_node.tmp_split = (pointAfterSplit + pointBeforeSplit) / 2.0f;
} else
tmp_node.tmp_split = 0;
}
if (threadIdx.x < 4) curr_dist[threadIdx.x] = 0.0f;
__syncthreads();
for (int i = threadIdx.x; i < numInds; i += blockDim.x) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i];
val = get_data_point(attribute, inst, params->dataset_info->nr_instances,
params->dataset);
if (val != -flt_max) {
GpuDte<T>::AtomicAdd(&curr_dist[(val < tmp_node.tmp_split) ? 0 : 1],
T(1));
GpuDte<T>::AtomicAdd(&curr_dist[(val < tmp_node.tmp_split) ? 2 : 3], val);
}
}
__syncthreads();
if (threadIdx.x == 0) {
curr_dist[2] = (curr_dist[0] != 0) ? curr_dist[2] / curr_dist[0] : 0;
curr_dist[3] = (curr_dist[1] != 0) ? curr_dist[3] / curr_dist[1] : 0;
}
return bestGain;
}
template GpuRf<float>::GpuRf();
template GpuRf<double>::GpuRf();
} | b9f4d67cc8f5765b81fa8f86133aafc0ca2929a0.cu | #define DLLExport
#define TestExport
#include "../../lib_gpu/include/cuda_kernel_helpers.hpp"
#include "gpurf.h"
namespace lib_ensembles {
template <typename T>
__global__ void host_kernel(GpuRf<T>* gpu_algo,
GpuDteAlgorithmShared::GpuParams<T> params,
GpuDteAlgorithmShared::GpuDteKernelId type) {
gpu_algo->GetConstPointers(¶ms.iteration_info, ¶ms.dataset_info,
¶ms.static_info);
switch (type) {
case GpuDteAlgorithmShared::kSetupKernel:
gpu_algo->gpurf_setup_kernel(¶ms);
break;
case GpuDteAlgorithmShared::kInitTreeBatch:
gpu_algo->gpurf_initialize_tree_batch(¶ms);
break;
case GpuDteAlgorithmShared::kFindSplit:
gpu_algo->gpurf_find_split(¶ms);
break;
case GpuDteAlgorithmShared::kPerformSplit:
gpu_algo->gpurf_perform_split(¶ms);
break;
case GpuDteAlgorithmShared::kPredict:
gpu_algo->gpurf_predict(¶ms);
break;
case GpuDteAlgorithmShared::kOobEstimate:
gpu_algo->gpurf_oob_estimate(¶ms);
break;
case GpuDteAlgorithmShared::kFeatureImp:
gpu_algo->gpurf_feature_importance(¶ms);
break;
default:
break;
}
}
template <typename T>
void GpuRf<T>::CallCudaKernel(int blocks, int block_size,
GpuDteAlgorithmShared::GpuParams<T>& params,
GpuDteAlgorithmShared::GpuDteKernelId id) {
host_kernel<T><<<blocks, block_size>>>(this, params, id);
}
template <typename T>
__device__ void GpuRf<T>::gpurf_setup_kernel(
GpuDteAlgorithmShared::GpuParams<T>* params) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
// Each thread gets same seed, a different sequence number, no offset
if (id < params->iteration_info->threads_launched)
curand_init(324123, id, 0, ¶ms->random_states[id]);
}
template <typename T>
__device__ void GpuRf<T>::gpurf_initialize_tree_batch(
GpuDteAlgorithmShared::GpuParams<T>* params) {
__shared__ int s_indexCursor;
if (threadIdx.x == 0) {
if (params->static_info->balanced_sampling &&
params->dataset_info->data_type == type_classification_)
s_indexCursor = 0;
else
s_indexCursor = params->dataset_info->nr_instances;
}
__syncthreads();
int treeOffset = params->dataset_info->nr_instances * blockIdx.x;
if (params->dataset_info->data_type == type_classification_ &&
params->static_info->balanced_sampling) {
// Initialize probability main buffer
int localCursor;
int randVal;
int stateId = (blockIdx.x * blockDim.x + threadIdx.x) %
params->static_info->node_buffer_size;
curandStateMRG32k3a localState = params->random_states[stateId];
for (int i = 0; i < params->dataset_info->nr_target_values; ++i) {
int targetStart = params->target_starts[i];
int targetEnd = (i == params->dataset_info->nr_target_values - 1)
? params->dataset_info->nr_instances - 1
: params->target_starts[i + 1] - 1;
for (int ii = threadIdx.x;
ii < params->dataset_info->nr_instances /
params->dataset_info->nr_target_values;
ii += blockDim.x) {
localCursor = GpuDte<T>::AtomicAdd(&s_indexCursor, 1);
if (targetEnd - targetStart > 0)
randVal =
targetStart + curand(&localState) % (targetEnd - targetStart);
else
randVal = targetStart;
params->indices_buffer[0][treeOffset + localCursor] = randVal;
params->indices_inbag[treeOffset + randVal] = true;
}
}
} else {
// Initialize indices main buffer
int randVal;
int stateId = (blockIdx.x * blockDim.x + threadIdx.x) %
params->static_info->node_buffer_size;
curandStateMRG32k3a localState = params->random_states[stateId];
for (int i = threadIdx.x; i < params->dataset_info->nr_instances;
i += blockDim.x) {
randVal = curand(&localState) % params->dataset_info->nr_instances;
params->indices_buffer[0][treeOffset + i] = randVal;
params->indices_inbag[treeOffset + randVal] = true;
}
}
__syncthreads();
if (threadIdx.x == 0) {
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T> root;
root.parent_id = -2;
root.attribute = -2;
root.split_point = -2;
root.tracking_id = GpuDte<T>::AtomicAdd(¶ms->node_cursors[node_id_], 1);
root.node_index_start = treeOffset;
root.node_index_count = s_indexCursor;
params->node_buffers[params->iteration_info->read_buffer_id][blockIdx.x] =
root;
}
}
template <typename T>
__device__ void GpuRf<T>::gpurf_find_split(
GpuDteAlgorithmShared::GpuParams<T>* params) {
__shared__ T s_dynamic_shared[40];
__shared__ unsigned int s_histograms[1024];
__shared__ unsigned int s_offsets[256];
__shared__ GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T> s_tree_node;
__shared__ GpuDteAlgorithmShared::gpuDTE_TmpNodeValues<T> s_tmp_node;
__shared__ int s_attribute_type;
__shared__ bool s_sensible_split;
curandStateMRG32k3a localState;
if (threadIdx.x == 0) {
localState = params->random_states[blockIdx.x];
s_tree_node =
params->node_buffers[params->iteration_info->read_buffer_id]
[blockIdx.x + params->iteration_info->node_offset];
s_tmp_node =
params
->node_tmp_buffer[blockIdx.x + params->iteration_info->node_offset];
s_tmp_node.tmp_score = 0;
s_sensible_split = false;
for (int i = 0; i < params->dataset_info->nr_target_values * max_nominal_;
++i)
s_dynamic_shared[i] = 0;
}
__syncthreads();
// Find prior distribution
for (int i = threadIdx.x; i < s_tree_node.node_index_count; i += blockDim.x) {
int inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[s_tree_node.node_index_start + i];
switch (params->dataset_info->data_type) {
case type_classification_:
GpuDte<T>::AtomicAdd(&s_dynamic_shared[int(params->target_data[inst])],
T(1));
break;
case type_regression_:
GpuDte<T>::AtomicAdd(&s_dynamic_shared[0], params->target_data[inst]);
break;
}
}
__syncthreads();
bool firstFeature = true;
int k = params->static_info->nr_features;
int max_retries = k - params->dataset_info->nr_attributes < -10
? -10
: k - params->dataset_info->nr_attributes;
while ((k > max_retries) && (k-- > 0 || !s_sensible_split)) {
if (threadIdx.x == 0) {
s_tmp_node.tmp_attribute =
curand(&localState) % params->dataset_info->nr_attributes;
s_attribute_type = params->attribute_type[s_tmp_node.tmp_attribute];
s_attribute_type =
s_attribute_type >= max_nominal_ ? 2 : s_attribute_type;
}
__syncthreads();
// Sort indices on attribute
radix_sort_on_attribute(params, s_tree_node, s_tmp_node, s_histograms,
s_offsets);
__syncthreads();
T response;
switch (params->dataset_info->data_type) {
case type_classification_:
response = eval_numeric_attribute(params, s_tree_node, s_tmp_node,
s_dynamic_shared, s_attribute_type,
s_histograms, s_offsets);
break;
case type_regression_:
response =
GpuRf::variance_calculation(params, s_tree_node, s_tmp_node,
s_dynamic_shared, (T*)s_histograms);
break;
}
if (threadIdx.x == 0) {
if (s_tmp_node.tmp_score < response || firstFeature) {
// Save splitpoint, attribute and distribution
s_tmp_node.tmp_score = response;
s_tree_node.split_point = s_tmp_node.tmp_split;
s_tree_node.attribute = s_tmp_node.tmp_attribute;
switch (params->dataset_info->data_type) {
case type_classification_:
for (int i = 0;
i < params->dataset_info->nr_target_values * max_nominal_; ++i)
params->probability_tmp_buffer
[blockIdx.x * params->dataset_info->nr_target_values *
max_nominal_ +
i] = s_dynamic_shared[i];
break;
case type_regression_:
params->probability_tmp_buffer
[blockIdx.x * params->dataset_info->nr_target_values *
max_nominal_] = s_dynamic_shared[2];
params->probability_tmp_buffer
[blockIdx.x * params->dataset_info->nr_target_values *
max_nominal_ +
1] = s_dynamic_shared[3];
break;
}
}
if (s_tmp_node.tmp_score > 1e-3) s_sensible_split = true;
firstFeature = false;
}
__syncthreads();
}
// Copy back result
if (threadIdx.x == 0) {
params->node_buffers[params->iteration_info->read_buffer_id]
[blockIdx.x + params->iteration_info->node_offset] =
s_tree_node;
params->node_tmp_buffer[blockIdx.x + params->iteration_info->node_offset] =
s_tmp_node;
params->random_states[blockIdx.x] = localState;
}
}
template <typename T>
__device__ void GpuRf<T>::gpurf_perform_split(
GpuDteAlgorithmShared::GpuParams<T>* params) {
__shared__ int s_node_counts[40];
gpudte_perform_split(*(params->static_info), *(params->dataset_info),
*(params->iteration_info), params->probability_buffers,
params->probability_tmp_buffer, params->dataset,
params->attribute_type, s_node_counts,
params->indices_buffer, params->node_cursors,
params->node_buffers);
}
template <typename T>
__device__ void GpuRf<T>::gpurf_predict(
GpuDteAlgorithmShared::GpuParams<T>* params) {
if (threadIdx.x + blockIdx.x * blockDim.x >=
params->iteration_info->threads_launched)
return;
int tid = params->iteration_info->tree_offset + threadIdx.x +
blockIdx.x * blockDim.x;
gpudte_predict(
tid, params->dataset_info->nr_instances, params->dataset_info->data_type,
params->dataset_info->nr_target_values, params->node_buffers_classify,
params->dataset, params->probability_tmp_buffer, params->predictions,
params->attribute_type);
}
template <typename T>
__device__ void GpuRf<T>::gpurf_oob_estimate(
GpuDteAlgorithmShared::GpuParams<T>* params) {
if (threadIdx.x + blockIdx.x * blockDim.x >=
params->iteration_info->threads_launched)
return;
int tid = params->iteration_info->tree_offset + threadIdx.x +
blockIdx.x * blockDim.x;
int instanceId = tid % params->dataset_info->nr_instances;
int treeId = T(tid) / T(params->dataset_info->nr_instances);
bool inBag =
params->indices_inbag[params->dataset_info->nr_instances * treeId +
instanceId];
if (inBag) return;
T dataPoint;
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Classify<T> tree_node =
params->node_buffers_classify[treeId];
while (tree_node.child_count != 0) {
int attribute_type = params->attribute_type[tree_node.attribute];
dataPoint =
get_data_point(tree_node.attribute, instanceId,
params->dataset_info->nr_instances, params->dataset);
if (attribute_type > max_nominal_ ||
params->dataset_info->data_type == type_regression_) {
if (dataPoint != -flt_max)
tree_node =
(dataPoint < tree_node.split_point)
? params->node_buffers_classify[tree_node.child_start]
: params->node_buffers_classify[tree_node.child_start + 1];
else
tree_node = params->node_buffers_classify[tree_node.child_start];
} else {
if (dataPoint != -flt_max)
tree_node =
params
->node_buffers_classify[tree_node.child_start + int(dataPoint)];
else
tree_node = params->node_buffers_classify[tree_node.child_start];
}
}
switch (params->dataset_info->data_type) {
case type_classification_: {
int classSelector = 0;
T max_prob = 0;
for (int i = 0; i < params->dataset_info->nr_target_values; ++i) {
if (params->probability_tmp_buffer[tree_node.probability_start + i] >
max_prob) {
max_prob =
params->probability_tmp_buffer[tree_node.probability_start + i];
classSelector = i;
}
}
if (params->target_data[instanceId] == classSelector)
GpuDte<T>::AtomicAdd(¶ms->oobCounts[0], 1);
else
GpuDte<T>::AtomicAdd(¶ms->oobCounts[1], 1);
break;
}
case type_regression_:
GpuDte<T>::AtomicAdd(
¶ms->mse[0],
params->probability_tmp_buffer[tree_node.probability_start]);
break;
}
}
template <typename T>
__device__ void GpuRf<T>::gpurf_feature_importance(
GpuDteAlgorithmShared::GpuParams<T>* params) {
if (threadIdx.x + blockIdx.x * blockDim.x >=
params->iteration_info->threads_launched)
return;
int randAttribute = params->iteration_info->depth;
int tid = params->iteration_info->tree_offset + threadIdx.x +
blockIdx.x * blockDim.x;
int instanceId = tid % params->dataset_info->nr_instances;
int treeId = T(tid) / T(params->dataset_info->nr_instances);
bool inBag =
params->indices_inbag[params->dataset_info->nr_instances * treeId +
instanceId];
if (inBag) return;
int instance;
curandStateMRG32k3a localState = params->random_states[blockIdx.x];
T dataPoint;
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Classify<T> tree_node =
params->node_buffers_classify[treeId];
while (tree_node.child_count != 0) {
int attribute_type = params->attribute_type[tree_node.attribute];
if (randAttribute == tree_node.attribute)
instance = curand(&localState) % params->dataset_info->nr_instances;
else
instance = instanceId;
dataPoint =
get_data_point(tree_node.attribute, instance,
params->dataset_info->nr_instances, params->dataset);
if (attribute_type > max_nominal_ ||
params->dataset_info->data_type == type_regression_) {
if (dataPoint != -flt_max)
tree_node =
(dataPoint < tree_node.split_point)
? params->node_buffers_classify[tree_node.child_start]
: params->node_buffers_classify[tree_node.child_start + 1];
else
tree_node = params->node_buffers_classify[tree_node.child_start];
} else {
if (dataPoint != -flt_max)
tree_node =
params
->node_buffers_classify[tree_node.child_start + int(dataPoint)];
else
tree_node = params->node_buffers_classify[tree_node.child_start];
}
}
switch (params->dataset_info->data_type) {
case type_classification_: {
int classSelector = 0;
T max_prob = 0;
for (int i = 0; i < params->dataset_info->nr_target_values; ++i) {
if (params->probability_tmp_buffer[tree_node.probability_start + i] >
max_prob) {
max_prob =
params->probability_tmp_buffer[tree_node.probability_start + i];
classSelector = i;
}
}
if (params->target_data[instanceId] == classSelector)
GpuDte<T>::AtomicAdd(¶ms->oobCounts[randAttribute * 2], 1);
else
GpuDte<T>::AtomicAdd(¶ms->oobCounts[randAttribute * 2 + 1], 1);
break;
}
case type_regression_:
GpuDte<T>::AtomicAdd(
¶ms->mse[randAttribute],
params->probability_tmp_buffer[tree_node.probability_start]);
break;
}
}
template <typename T>
__device__ void GpuRf<T>::radix_sort_on_attribute(
GpuDteAlgorithmShared::GpuParams<T>* params,
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T>& node,
GpuDteAlgorithmShared::gpuDTE_TmpNodeValues<T>& tmp_node,
unsigned int s_histograms[1024], unsigned int s_offsets[256]) {
__shared__ unsigned int s_nrNegativeValues;
__shared__ unsigned char s_thread_radix[64];
if (threadIdx.x == 0) s_nrNegativeValues = 0;
unsigned int* input =
(unsigned int*)¶ms->dataset[tmp_node.tmp_attribute *
params->dataset_info->nr_instances];
unsigned int* indices =
(unsigned int*)¶ms
->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[node.node_index_start];
unsigned int* indices2 =
(unsigned int*)¶ms
->indices_buffer[params->iteration_info->tick_tock ? 1 : 0]
[node.node_index_start];
for (int i = threadIdx.x; i < 1024; i += blockDim.x) s_histograms[i] = 0;
__syncthreads();
unsigned char* dataVal;
for (int i = threadIdx.x; i < node.node_index_count; i += blockDim.x) {
dataVal = (unsigned char*)&input[indices[i]];
GpuDte<T>::AtomicAdd(&s_histograms[*dataVal], 1);
GpuDte<T>::AtomicAdd(&s_histograms[256 + (*(dataVal + 1))], 1);
GpuDte<T>::AtomicAdd(&s_histograms[512 + (*(dataVal + 2))], 1);
GpuDte<T>::AtomicAdd(&s_histograms[768 + (*(dataVal + 3))], 1);
}
__syncthreads();
for (int i = threadIdx.x + 128; i < 256; i += blockDim.x)
GpuDte<T>::AtomicAdd(&s_nrNegativeValues, s_histograms[768 + i]);
// Radix sort, j is the pass number (0=LSB, 3=MSB)
bool performPass;
unsigned int* curCount;
unsigned char uniqueVal;
for (int j = 0; j < 4; j++) {
__syncthreads();
performPass = true;
curCount = &s_histograms[j << 8];
uniqueVal = *(((unsigned char*)&input[indices[0]]) + j);
if (curCount[uniqueVal] == node.node_index_count) performPass = false;
// Should we care about negative values?
if (j != 3) {
// Here we deal with positive values only
if (performPass) {
// Create offsets
lib_gpu::CudaKernelHelpers::inplace_prefixsum(curCount, 256);
// Perform Radix Sort
bool skip;
unsigned int id, spot;
unsigned char radix;
for (int i = 0; i < node.node_index_count; i += blockDim.x) {
skip = threadIdx.x + i >= node.node_index_count;
if (!skip) {
id = indices[threadIdx.x + i];
radix = *(((unsigned char*)&input[id]) + j);
s_thread_radix[threadIdx.x] = radix;
spot = curCount[radix];
}
__syncthreads();
if (!skip) {
GpuDte<T>::AtomicAdd(&curCount[radix], 1);
for (int ii = threadIdx.x; ii > 0; --ii)
if (s_thread_radix[ii - 1] == radix) ++spot;
indices2[spot] = id;
}
__syncthreads();
}
unsigned int* Tmp = indices;
indices = indices2;
indices2 = Tmp;
}
} else {
// This is a special case to correctly handle negative values
if (performPass) {
// Create biased offsets, in order for negative numbers to be sorted as well
#pragma unroll
for (int i = threadIdx.x; i < 256; i += blockDim.x)
s_offsets[i] = curCount[i];
__syncthreads();
lib_gpu::CudaKernelHelpers::inplace_prefixsum(s_offsets, 128);
if (threadIdx.x == 0) s_offsets[0] = s_nrNegativeValues;
lib_gpu::CudaKernelHelpers::inplace_reverse_prefixsum(&s_offsets[128],
128);
// Fixing the wrong place for negative values
#pragma unroll
for (int i = threadIdx.x + 128; i < 256; i += blockDim.x)
s_offsets[i] += curCount[i];
__syncthreads();
bool skip;
int spot;
unsigned int id;
unsigned char radix;
for (int i = 0; i < node.node_index_count; i += blockDim.x) {
skip = threadIdx.x + i >= node.node_index_count;
if (!skip) {
id = indices[threadIdx.x + i];
radix = input[id] >> 24;
s_thread_radix[threadIdx.x] = radix;
if (radix < 128)
spot = s_offsets[radix];
else
spot = s_offsets[radix] - 1;
}
__syncthreads();
if (!skip) {
if (radix < 128)
GpuDte<T>::AtomicAdd((int*)&s_offsets[radix], 1);
else
GpuDte<T>::AtomicAdd((int*)&s_offsets[radix], -1);
for (int ii = threadIdx.x; ii > 0; --ii)
if (s_thread_radix[ii - 1] == radix) spot += radix < 128 ? 1 : -1;
indices2[spot] = id;
}
__syncthreads();
}
unsigned int* Tmp = indices;
indices = indices2;
indices2 = Tmp;
} else {
// The pass is useless, yet we still have to reverse the order of
// current list if all values are negative.
if (uniqueVal >= 128) {
for (unsigned int i = threadIdx.x; i < node.node_index_count;
i += blockDim.x)
indices2[i] = indices[node.node_index_count - i - 1];
// Swap pointers for next pass. Valid indices - the most recent ones -
// are in mIndices after the swap.
unsigned int* Tmp = indices;
indices = indices2;
indices2 = Tmp;
}
}
}
}
__syncthreads();
// Need to copy back to the correct indices buffer
if (indices !=
(unsigned int*)¶ms
->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[node.node_index_start]) {
for (int i = threadIdx.x; i < node.node_index_count; i += blockDim.x)
indices2[i] = indices[i];
}
}
template <typename T>
__device__ T GpuRf<T>::eval_numeric_attribute(
GpuDteAlgorithmShared::GpuParams<T>* params,
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T>& node,
GpuDteAlgorithmShared::gpuDTE_TmpNodeValues<T>& tmp_node, T* curr_dist,
int att_type, unsigned int* s_histograms, unsigned int* s_offsets) {
__shared__ T local_dist[40];
int numInds = node.node_index_count;
int nodeIndStart = node.node_index_start;
int inst, bestI = 0;
T val, preVal;
T response, bestResponse = 0.0f;
for (int i = 0; i < params->dataset_info->nr_target_values; ++i)
local_dist[i + params->dataset_info->nr_target_values] = 0;
for (int i = 0; i < params->dataset_info->nr_target_values; ++i)
local_dist[i] =
curr_dist[i] + curr_dist[i + params->dataset_info->nr_target_values];
s_offsets[threadIdx.x] = 0;
T prior = entropy_over_columns((T*)curr_dist, att_type,
params->dataset_info->nr_target_values);
__syncthreads();
for (int i = threadIdx.x;
i < params->dataset_info->nr_target_values * max_nominal_;
i += blockDim.x)
curr_dist[i] = local_dist[i];
__syncthreads();
// Find best split on attribute
bool skip;
for (int i = 0; i < numInds; i += blockDim.x) {
skip = threadIdx.x + i >= node.node_index_count;
if (!skip) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i + threadIdx.x];
val = get_data_point(tmp_node.tmp_attribute, inst,
params->dataset_info->nr_instances, params->dataset);
s_offsets[threadIdx.x] = int(params->target_data[inst]);
if (i + threadIdx.x != 0) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i + threadIdx.x - 1];
preVal =
get_data_point(tmp_node.tmp_attribute, inst,
params->dataset_info->nr_instances, params->dataset);
}
GpuDte<T>::AtomicAdd(&curr_dist[params->dataset_info->nr_target_values +
s_offsets[threadIdx.x]],
T(1));
GpuDte<T>::AtomicAdd(&curr_dist[s_offsets[threadIdx.x]], T(-1));
}
__syncthreads();
if (!skip) {
for (int ii = threadIdx.x; ii >= 0; --ii) {
++local_dist[params->dataset_info->nr_target_values + s_offsets[ii]];
--local_dist[s_offsets[ii]];
}
response = prior - entropy_conditioned_on_rows(
local_dist, att_type,
params->dataset_info->nr_target_values);
if (bestResponse < response && (preVal < val || threadIdx.x + i == 0)) {
bestResponse = response;
bestI = i + threadIdx.x;
}
for (int ii = 0;
ii < params->dataset_info->nr_target_values * max_nominal_; ++ii)
local_dist[ii] = curr_dist[ii];
}
__syncthreads();
}
T* responses = (T*)s_offsets;
responses[threadIdx.x] = bestResponse;
s_offsets[threadIdx.x + blockDim.x] = bestI;
for (int i = threadIdx.x;
i < params->dataset_info->nr_target_values * max_nominal_;
i += blockDim.x)
curr_dist[i] = 0;
__syncthreads();
for (int i = blockDim.x >> 1; i > 0; i >>= 1) {
if (threadIdx.x < i) {
if (responses[i + threadIdx.x] > responses[threadIdx.x]) {
responses[threadIdx.x] = responses[i + threadIdx.x];
s_offsets[blockDim.x + threadIdx.x] =
s_offsets[blockDim.x + threadIdx.x + i];
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
bestI = s_offsets[blockDim.x];
bestResponse = responses[threadIdx.x];
if (bestI > 0) {
T pointBeforeSplit = 0.0f, pointAfterSplit = 0.0f;
int instJustBeforeSplit, instJustAfterSplit;
instJustBeforeSplit =
params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + bestI - 1];
instJustAfterSplit =
params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + bestI];
pointBeforeSplit =
get_data_point(tmp_node.tmp_attribute, instJustBeforeSplit,
params->dataset_info->nr_instances, params->dataset);
pointAfterSplit =
get_data_point(tmp_node.tmp_attribute, instJustAfterSplit,
params->dataset_info->nr_instances, params->dataset);
tmp_node.tmp_split = (pointAfterSplit + pointBeforeSplit) / 2.0f;
} else
tmp_node.tmp_split = 0;
}
__syncthreads();
// Assemble new distribution
for (int i = threadIdx.x; i < numInds; i += blockDim.x) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i];
val = get_data_point(tmp_node.tmp_attribute, inst,
params->dataset_info->nr_instances, params->dataset);
if (val != -flt_max)
GpuDte<T>::AtomicAdd(&curr_dist[params->dataset_info->nr_target_values *
((val < tmp_node.tmp_split) ? 0 : 1) +
int(params->target_data[inst])],
T(1));
else
GpuDte<T>::AtomicAdd(&curr_dist[int(params->target_data[inst])], T(1));
}
return bestResponse;
}
template <typename T>
__device__ T GpuRf<T>::variance_calculation(
GpuDteAlgorithmShared::GpuParams<T>* params,
GpuDteAlgorithmShared::gpuDTE_NodeHeader_Train<T>& node,
GpuDteAlgorithmShared::gpuDTE_TmpNodeValues<T>& tmp_node, T* curr_dist,
T* s_histograms) {
int numInds = node.node_index_count;
int nodeIndStart = node.node_index_start;
int attribute = tmp_node.tmp_attribute;
int inst, bestI = 0;
T val, means[2], bestGain = 0.0f, preVal;
means[0] = 0;
if (threadIdx.x == 0) curr_dist[0] = 0.0f;
// Calculate mean values from split
for (int i = threadIdx.x; i < numInds; i += blockDim.x) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i];
val = get_data_point(attribute, inst, params->dataset_info->nr_instances,
params->dataset);
if (val != -flt_max)
GpuDte<T>::AtomicAdd(&curr_dist[1], params->target_data[inst]);
}
__syncthreads();
means[1] = curr_dist[1];
T gain = 0;
bool skip;
for (int i = 0; i < numInds; i += blockDim.x) {
skip = threadIdx.x + i >= numInds;
if (!skip) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i + threadIdx.x];
val = get_data_point(attribute, inst, params->dataset_info->nr_instances,
params->dataset);
s_histograms[threadIdx.x] = val;
if (i + threadIdx.x != 0) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i + threadIdx.x - 1];
preVal =
get_data_point(tmp_node.tmp_attribute, inst,
params->dataset_info->nr_instances, params->dataset);
}
GpuDte<T>::AtomicAdd(&curr_dist[0], val);
GpuDte<T>::AtomicAdd(&curr_dist[1], -val);
}
__syncthreads();
if (!skip) {
for (int ii = threadIdx.x; ii >= 0; --ii) {
means[0] += s_histograms[ii];
means[1] -= s_histograms[ii];
}
T nLeft = i + threadIdx.x + 1;
T nRight = numInds - nLeft;
if (nRight < 1) nRight = 1;
T diff = ((means[0] / nLeft) - (means[1] / nRight));
gain = (nLeft * nRight * diff * diff / (nLeft + nRight));
if (bestGain < gain && (preVal < val || threadIdx.x + i == 0)) {
bestGain = gain;
bestI = i + threadIdx.x;
}
means[0] = curr_dist[0];
means[1] = curr_dist[1];
}
__syncthreads();
}
T* responses = (T*)s_histograms;
responses[threadIdx.x] = bestGain;
s_histograms[threadIdx.x + blockDim.x] = bestI;
__syncthreads();
for (int i = blockDim.x >> 1; i > 0; i >>= 1) {
if (threadIdx.x < i) {
if (responses[i + threadIdx.x] > responses[threadIdx.x]) {
responses[threadIdx.x] = responses[i + threadIdx.x];
s_histograms[blockDim.x + threadIdx.x] =
s_histograms[blockDim.x + threadIdx.x + i];
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
bestI = s_histograms[blockDim.x];
bestGain = responses[threadIdx.x];
if (bestI > 0) {
T pointBeforeSplit = 0.0f, pointAfterSplit = 0.0f;
int instJustBeforeSplit, instJustAfterSplit;
instJustBeforeSplit =
params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + bestI - 1];
instJustAfterSplit =
params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + bestI];
pointBeforeSplit =
get_data_point(tmp_node.tmp_attribute, instJustBeforeSplit,
params->dataset_info->nr_instances, params->dataset);
pointAfterSplit =
get_data_point(tmp_node.tmp_attribute, instJustAfterSplit,
params->dataset_info->nr_instances, params->dataset);
tmp_node.tmp_split = (pointAfterSplit + pointBeforeSplit) / 2.0f;
} else
tmp_node.tmp_split = 0;
}
if (threadIdx.x < 4) curr_dist[threadIdx.x] = 0.0f;
__syncthreads();
for (int i = threadIdx.x; i < numInds; i += blockDim.x) {
inst = params->indices_buffer[params->iteration_info->tick_tock ? 0 : 1]
[nodeIndStart + i];
val = get_data_point(attribute, inst, params->dataset_info->nr_instances,
params->dataset);
if (val != -flt_max) {
GpuDte<T>::AtomicAdd(&curr_dist[(val < tmp_node.tmp_split) ? 0 : 1],
T(1));
GpuDte<T>::AtomicAdd(&curr_dist[(val < tmp_node.tmp_split) ? 2 : 3], val);
}
}
__syncthreads();
if (threadIdx.x == 0) {
curr_dist[2] = (curr_dist[0] != 0) ? curr_dist[2] / curr_dist[0] : 0;
curr_dist[3] = (curr_dist[1] != 0) ? curr_dist[3] / curr_dist[1] : 0;
}
return bestGain;
}
template GpuRf<float>::GpuRf();
template GpuRf<double>::GpuRf();
} |
64ecaab7efdaa4d1158ad37c8c3c16474a2081d5.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_complex.h>
#include <stdio.h>
#include <malloc.h>
#include <stdlib.h>
#include <complex.h>
#include <math.h>
#define N 400 //
#define tmax 10 //
float mu=100;
float const w = 5;
float h=(float) 2/N;// ;
void KMatr(int n, cuFloatComplex** M);
cuFloatComplex prm (float x);
cuFloatComplex sigm(float x);
float gf(float m);
float cf(float x);
void MMatr(int n, cuFloatComplex** M);
void FMatr(int n, cuFloatComplex* M);
int solve(int n, cuFloatComplex *top, cuFloatComplex *mid, cuFloatComplex *bot, cuFloatComplex *b, cuFloatComplex *x);
int main( int argc, char * argv [] ){
int k,i,n,j;
k=(int)pow(2,(int)ceil(log(N-1)/log(2)));
n=k+1;
cuFloatComplex **K, **M, *F, **A;
K = (cuFloatComplex**)malloc((n) * sizeof(cuFloatComplex*));
M = (cuFloatComplex**)malloc((n) * sizeof(cuFloatComplex*));
F = (cuFloatComplex*) malloc((n) * sizeof(cuFloatComplex)) ;
A = (cuFloatComplex**)malloc((n) * sizeof(cuFloatComplex*));
for (i=0;i<n;i++){
K[i]=(cuFloatComplex*)malloc((n) * sizeof(cuFloatComplex));
M[i]=(cuFloatComplex*)malloc((n) * sizeof(cuFloatComplex));
A[i]=(cuFloatComplex*)malloc((n) * sizeof(cuFloatComplex));
}
KMatr(N, K);
MMatr(N, M);
FMatr(N, F);
for (i=0;i<N;i++){
for (j=0;j<N;j++){
K[i][j]=cuCdivf(K[i][j],make_cuFloatComplex(h,0)) ;
M[i][j]=cuCmulf(M[i][j],make_cuFloatComplex(h/6,0));
}
}
for (i=0;i<N;i++){
for (j=0;j<N;j++){
A[i][j]=cuCaddf( cuCmulf( make_cuFloatComplex( -w*w,0), M[i][j]), K[i][j]);
}
}
cuFloatComplex *a,*b,*c,*u;
a = (cuFloatComplex*)malloc((n) * sizeof(cuFloatComplex));
b = (cuFloatComplex*)malloc((n) * sizeof(cuFloatComplex));
c = (cuFloatComplex*)malloc((n) * sizeof(cuFloatComplex));
u = (cuFloatComplex*)malloc((n) * sizeof(cuFloatComplex));
for (i=1;i<(N-1);i++){
a[i+1]=A[i+1][i];
b[i]=A[i][i];
c[i-1]=A[i][i-1];
}
a[0]=make_cuFloatComplex(0,0);
a[1]=A[1][0];
b[0]=A[0][0];
b[N-1]=A[N-1][N-1];
c[N-2]=A[N-1][N-2];
c[N-1]=make_cuFloatComplex(0,0);
for (j=N;j<(n);j++) {
a[j]=make_cuFloatComplex(0,0);
b[j]=make_cuFloatComplex(1,0);
c[j]=make_cuFloatComplex(0,0);
F[j]=make_cuFloatComplex(0,0);
}
int l;
l=solve (n,a,b,c,F,u);
}
int solve(int n, cuFloatComplex *top, cuFloatComplex *mid, cuFloatComplex *bot, cuFloatComplex *b, cuFloatComplex *x){
return 0;
}
void MMatr(int n, cuFloatComplex** M){
int i;
cuFloatComplex t1,t2;
for (i=0;i<(n-1);i++) {
t1=cuCmulf(make_cuFloatComplex( gf(i+1.5),0), prm(i+1.5));
t2=cuCmulf( make_cuFloatComplex(gf(i+0.5),0) , prm(i+0.5));
M[i][i]=cuCmulf(make_cuFloatComplex(2,0), cuCaddf( t1 ,t2 ));
M[i+1][i]=t1;
M[i][i+1]=t1;
}
M[n-1][n-1]=cuCmulf(make_cuFloatComplex( 2*gf(n-0.5),0), prm(n-0.5)) ;
}
cuFloatComplex prm ( float x){
return cuCdivf(cuCaddf( make_cuFloatComplex(w,0), cuCmulf(make_cuFloatComplex(0,-1), sigm(x*h) )) , make_cuFloatComplex(w,0));
}
cuFloatComplex sigm(float x){
if (x <= 1)
return make_cuFloatComplex (0,0);
else
return make_cuFloatComplex( mu*(x-1)*(x-1),0);
}
float gf(float m){
return ( ( 1/cf(m*h) ) / cf(m*h) );
}
float cf(float x){
if (x<=1)
return (0.1 +3.6*(x-0.5)*(x-0.5));
else
return 1;
}
void KMatr(int n, cuFloatComplex ** M){
int i;
cuFloatComplex t1,t2;
for (i=0;i<(n-1);i++){
t1=cuCdivf(make_cuFloatComplex(1,0), prm(i+0.5));
t2=cuCdivf(make_cuFloatComplex(1,0), prm(i+1.5));
M[i][i]= cuCaddf(t1,t2);
M[i+1][i]=cuCdivf(make_cuFloatComplex(-1,0), prm(i+1.5));
M[i][i+1]=cuCdivf(make_cuFloatComplex(-1,0), prm(i+1.5));
}
M[n-1][n-1]=cuCdivf(make_cuFloatComplex(1,0), prm(n-0.5));
}
void FMatr(int n, cuFloatComplex * M){
M[0]=cuCaddf( cuCmulf(make_cuFloatComplex(w*w*h*gf(0.5)/6,0) , prm(0.5) ) , cuCmulf(make_cuFloatComplex(1/h,0),prm(0.5)));
} | 64ecaab7efdaa4d1158ad37c8c3c16474a2081d5.cu | #include <cuComplex.h>
#include <stdio.h>
#include <malloc.h>
#include <stdlib.h>
#include <complex.h>
#include <math.h>
#define N 400 //размер расчетной прямой
#define tmax 10 // нужный момент времени
float mu=100;
float const w = 5;
float h=(float) 2/N;//шаг по прямой;
void KMatr(int n, cuFloatComplex** M);
cuFloatComplex prm (float x);
cuFloatComplex sigm(float x);
float gf(float m);
float cf(float x);
void MMatr(int n, cuFloatComplex** M);
void FMatr(int n, cuFloatComplex* M);
int solve(int n, cuFloatComplex *top, cuFloatComplex *mid, cuFloatComplex *bot, cuFloatComplex *b, cuFloatComplex *x);
int main( int argc, char * argv [] ){
int k,i,n,j;
k=(int)pow(2,(int)ceil(log(N-1)/log(2)));
n=k+1;
cuFloatComplex **K, **M, *F, **A;
K = (cuFloatComplex**)malloc((n) * sizeof(cuFloatComplex*));
M = (cuFloatComplex**)malloc((n) * sizeof(cuFloatComplex*));
F = (cuFloatComplex*) malloc((n) * sizeof(cuFloatComplex)) ;
A = (cuFloatComplex**)malloc((n) * sizeof(cuFloatComplex*));
for (i=0;i<n;i++){
K[i]=(cuFloatComplex*)malloc((n) * sizeof(cuFloatComplex));
M[i]=(cuFloatComplex*)malloc((n) * sizeof(cuFloatComplex));
A[i]=(cuFloatComplex*)malloc((n) * sizeof(cuFloatComplex));
}
KMatr(N, K);
MMatr(N, M);
FMatr(N, F);
for (i=0;i<N;i++){
for (j=0;j<N;j++){
K[i][j]=cuCdivf(K[i][j],make_cuFloatComplex(h,0)) ;
M[i][j]=cuCmulf(M[i][j],make_cuFloatComplex(h/6,0));
}
}
for (i=0;i<N;i++){
for (j=0;j<N;j++){
A[i][j]=cuCaddf( cuCmulf( make_cuFloatComplex( -w*w,0), M[i][j]), K[i][j]);
}
}
cuFloatComplex *a,*b,*c,*u;
a = (cuFloatComplex*)malloc((n) * sizeof(cuFloatComplex));
b = (cuFloatComplex*)malloc((n) * sizeof(cuFloatComplex));
c = (cuFloatComplex*)malloc((n) * sizeof(cuFloatComplex));
u = (cuFloatComplex*)malloc((n) * sizeof(cuFloatComplex));
for (i=1;i<(N-1);i++){
a[i+1]=A[i+1][i];
b[i]=A[i][i];
c[i-1]=A[i][i-1];
}
a[0]=make_cuFloatComplex(0,0);
a[1]=A[1][0];
b[0]=A[0][0];
b[N-1]=A[N-1][N-1];
c[N-2]=A[N-1][N-2];
c[N-1]=make_cuFloatComplex(0,0);
for (j=N;j<(n);j++) {
a[j]=make_cuFloatComplex(0,0);
b[j]=make_cuFloatComplex(1,0);
c[j]=make_cuFloatComplex(0,0);
F[j]=make_cuFloatComplex(0,0);
}
int l;
l=solve (n,a,b,c,F,u);
}
int solve(int n, cuFloatComplex *top, cuFloatComplex *mid, cuFloatComplex *bot, cuFloatComplex *b, cuFloatComplex *x){
return 0;
}
void MMatr(int n, cuFloatComplex** M){
int i;
cuFloatComplex t1,t2;
for (i=0;i<(n-1);i++) {
t1=cuCmulf(make_cuFloatComplex( gf(i+1.5),0), prm(i+1.5));
t2=cuCmulf( make_cuFloatComplex(gf(i+0.5),0) , prm(i+0.5));
M[i][i]=cuCmulf(make_cuFloatComplex(2,0), cuCaddf( t1 ,t2 ));
M[i+1][i]=t1;
M[i][i+1]=t1;
}
M[n-1][n-1]=cuCmulf(make_cuFloatComplex( 2*gf(n-0.5),0), prm(n-0.5)) ;
}
cuFloatComplex prm ( float x){
return cuCdivf(cuCaddf( make_cuFloatComplex(w,0), cuCmulf(make_cuFloatComplex(0,-1), sigm(x*h) )) , make_cuFloatComplex(w,0));
}
cuFloatComplex sigm(float x){
if (x <= 1)
return make_cuFloatComplex (0,0);
else
return make_cuFloatComplex( mu*(x-1)*(x-1),0);
}
float gf(float m){
return ( ( 1/cf(m*h) ) / cf(m*h) );
}
float cf(float x){
if (x<=1)
return (0.1 +3.6*(x-0.5)*(x-0.5));
else
return 1;
}
void KMatr(int n, cuFloatComplex ** M){
int i;
cuFloatComplex t1,t2;
for (i=0;i<(n-1);i++){
t1=cuCdivf(make_cuFloatComplex(1,0), prm(i+0.5));
t2=cuCdivf(make_cuFloatComplex(1,0), prm(i+1.5));
M[i][i]= cuCaddf(t1,t2);
M[i+1][i]=cuCdivf(make_cuFloatComplex(-1,0), prm(i+1.5));
M[i][i+1]=cuCdivf(make_cuFloatComplex(-1,0), prm(i+1.5));
}
M[n-1][n-1]=cuCdivf(make_cuFloatComplex(1,0), prm(n-0.5));
}
void FMatr(int n, cuFloatComplex * M){
M[0]=cuCaddf( cuCmulf(make_cuFloatComplex(w*w*h*gf(0.5)/6,0) , prm(0.5) ) , cuCmulf(make_cuFloatComplex(1/h,0),prm(0.5)));
} |
3d91c1167ea5a1ddec5b243dc416bda7c8fb09da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __APPLE__
# include <GL/glut.h>
#else
# include <GLUT/glut.h>
#endif
#include <iostream>
#include <fstream>
using namespace std;
#define BLOCK_SIZE 128
__global__ void simpleKernel(
float* output )
{
output[threadIdx.x] = 0;
}
cudaGraphicsResource* positionsVBO_CUDA;
void display()
{
glClear(GL_COLOR_BUFFER_BIT);
glutSwapBuffers();
static bool once = true;
if (!once)
return;
once = false;
unsigned N = BLOCK_SIZE;
unsigned size = N*sizeof(float);
float* g_data;
hipError_t mallocd = hipMalloc( &g_data, size );
dim3 block( BLOCK_SIZE );
dim3 grid( 1 );
hipLaunchKernelGGL(( simpleKernel), dim3(grid), dim3(block), 0, 0, g_data);
hipError_t freed = hipFree( g_data );
hipError_t sync = hipDeviceSynchronize();
bool all_success = (mallocd == hipSuccess)
&& (freed == hipSuccess)
&& (sync == hipSuccess);
cout << "all_success = " << all_success << endl
<< "mallocd = " << (mallocd == hipSuccess) << endl
<< "freed = " << (freed == hipSuccess) << endl
<< "sync = " << (sync == 0) << endl;
bool any_failed = !all_success;
exit(any_failed);
}
int main(int argc, char *argv[])
{
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(500, 500);
glutInitWindowPosition(300, 200);
glutCreateWindow(__FILE__);
glutDisplayFunc( display );
glutMainLoop();
return 0;
}
| 3d91c1167ea5a1ddec5b243dc416bda7c8fb09da.cu | #ifndef __APPLE__
# include <GL/glut.h>
#else
# include <GLUT/glut.h>
#endif
#include <iostream>
#include <fstream>
using namespace std;
#define BLOCK_SIZE 128
__global__ void simpleKernel(
float* output )
{
output[threadIdx.x] = 0;
}
cudaGraphicsResource* positionsVBO_CUDA;
void display()
{
glClear(GL_COLOR_BUFFER_BIT);
glutSwapBuffers();
static bool once = true;
if (!once)
return;
once = false;
unsigned N = BLOCK_SIZE;
unsigned size = N*sizeof(float);
float* g_data;
cudaError mallocd = cudaMalloc( &g_data, size );
dim3 block( BLOCK_SIZE );
dim3 grid( 1 );
simpleKernel<<< grid, block>>>(g_data);
cudaError freed = cudaFree( g_data );
cudaError sync = cudaThreadSynchronize();
bool all_success = (mallocd == cudaSuccess)
&& (freed == cudaSuccess)
&& (sync == cudaSuccess);
cout << "all_success = " << all_success << endl
<< "mallocd = " << (mallocd == cudaSuccess) << endl
<< "freed = " << (freed == cudaSuccess) << endl
<< "sync = " << (sync == 0) << endl;
bool any_failed = !all_success;
exit(any_failed);
}
int main(int argc, char *argv[])
{
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(500, 500);
glutInitWindowPosition(300, 200);
glutCreateWindow(__FILE__);
glutDisplayFunc( display );
glutMainLoop();
return 0;
}
|
cf96b692a6787666b9588d6cd814a46f973bec42.hip | // !!! This is a file automatically generated by hipify!!!
//
// Gpu.cu
// This file contains the definition of the Gpu class which provides the GPU
// API for the GPU fuctions requred to build the Kd tree.
//
// Created by John Robinson on 7/15/15.
// Copyright (c) 2015 John Robinson. All rights reserved.
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSEARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//
/********************************************************************************
/* DBUG defines
/********************************************************************************/
//#define FAKE_TWO // runs the Multi-GPU code on a single GPU
#include <limits>
#include <hip/hip_runtime.h>
#include <omp.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // Helper for shared that are common to CUDA Samples
#include "Gpu.h"
#include "mergeSort_common.h"
#include "removeDups_common.h"
#include "buildKdTree_common.h"
sint Gpu::numGPUs = 0;
Gpu* Gpu::gpus[MAX_GPUS] = {NULL};
refIdx_t Gpu::firstNode;
KdNode Gpu::gpu1stNode;
/*
* cuSuperKeyCompare function performs the compare between coordinates used in the sorting and partitioning
* functions. It starts by subtracting the primary coordinate or the pth coordinate and proceeds through each
* of the coordinates until it finds the non-zero difference. That difference is returned as the compare result.
* Inputs
* a[] Pointer to the first coordinate
* b[] Pointer to the second coordinate
* p index to the primary coordinate to compare. p must be less than dim
* dim Number of dimensions the coordinate has.
*
* Returns a long that is positive if a>b, 0 if equal an negative is a<b
*/
__device__ long cuSuperKeyCompare(const KdCoord a[], const KdCoord b[], const sint p, const sint dim)
{
KdCoord diff=0;
for (sint i = 0; i < dim; i++) {
sint r = i + p;
r = (r < dim) ? r : r - dim;
diff = a[r] - b[r];
if (diff != 0) {
break;
}
}
return diff;
}
/*
* cuSuperKeyCompareFirstDim is a GPU function that performs the same function as cuSuperKeyCompare.
* But in the case where the calling code has pre-fetched the first dimension or component, the this takes
* the A and B components as L values and only access the array values if the first components happen to be equal.
* Inputs
* ap first compare component l value
* bp first compare component l value
* *a a coordinates
* *b b coordinates
* p index of the first
* dim number of dimensions the coordinates have
*
* Returns a long that is positive if a>b, 0 if equal an negative is a<b
*/
__device__ long cuSuperKeyCompareFirstDim(const KdCoord ap, const KdCoord bp, const KdCoord *a, const KdCoord *b, const sint p, const sint dim)
{
KdCoord diff = ap - bp;
for (sint i = 1; diff == 0 && i < dim; i++) {
sint r = i + p;
r = (r < dim) ? r : r - dim;
diff = a[r] - b[r];
}
return diff;
}
/*
* cuInitializeKnodesArray is a GPU kernel that initializes the array of KdNodes that will eventually
* be the kdNode tree. Initialization include copying the coordinates from the coordinates array and
* initing the child node indices to -1 which is the terminal node indicator
* Inputs
* kdNodes Pointer to the array of uninitialized kd nodes.
* coordinates Pointer the the array coordinates.
* numTuples number of coordinates and kd nodes.
* dim dimension of the coordinates
* numTotalThreads number of threads being used to do the initing
*/
__global__ void cuInitializeKdNodesArray(KdNode kdNodes[], KdCoord coordinates[], const sint numTuples, const sint dim, sint numTotalThreads){
sint index = threadIdx.x + blockIdx.x * blockDim.x;
for (sint i = index; i < (numTuples); i+=numTotalThreads) {
kdNodes[i].tuple = i;
kdNodes[i].ltChild = -1;
kdNodes[i].gtChild = -1;
}
}
/*
* cuCupyCoordinate is a GPU kernel that copies the pth coordinate from the coordinate array into a 1 dimensional array.
* The copy is done suche that the value is at the same index as the reference in the d_ref array.
* Inputs
* coord[] pointer to the coordinates array.
* d_sval[] pointer to the value array that the data is to be copied too.
* d_ref[] pointer to reference array that it is should match
* p sint indicating the coordinate to copy
* dim sint indicating dimensions
* numTuples sint indicating number of values to copy
*/
__global__ void cuCopyCoordinate(const KdCoord coord[], KdCoord d_sval[], const refIdx_t d_ref[], const sint p, const sint dim, const sint numTuples){
sint index = threadIdx.x + blockIdx.x * blockDim.x;
sint stride = blockDim.x * gridDim.x;
for (sint j = 0+index; j < numTuples; j+=stride) {
refIdx_t t = d_ref[j];
d_sval[j] = coord[t*dim+p];
}
}
/*
* initializeKdNodesArrayGPU is a Gpu class method that allocated the KdNode array in the GPU and calls the cuInitilizeKdNodesArray.
* It also copies the coordinates data over to the gpu if it's not there already.
* Inputs
* coordinates[] Pointer to the coordinate data the to be put in the kdTree. Can be null if the GPU already has the coordinates
* numTuples sint indicating number of coordinates and the number of kdNodes to be created
* dim number of dimensions.
*/
void Gpu::initializeKdNodesArrayGPU(const KdCoord coordinates[], const sint numTuples, const sint dim){
// Make this whole fuction critical so that memory allocations will not fail.
#pragma omp critical (launchLock)
{
setDevice();
// First allocate memory for the coordinate array and copy it to the device
if (d_coord == NULL) {
checkCudaErrors(hipMalloc((void **) &d_coord, (numTuples+1)*sizeof(int)*dim)); // Allocate an extra for max coord
checkCudaErrors(hipMemcpyAsync(d_coord, coordinates, numTuples*sizeof(int)*dim, hipMemcpyHostToDevice, stream));
} else if (d_coord != NULL) {
cout << "initializeKdNodesArrayGPU Error: coordinate array already allocated" << endl;
exit(1);
}
// Add an extra tuple at the end with all max values.
KdCoord tmp[dim];
for (int i=0; i<dim; i++){
tmp[i] = std::numeric_limits<KdCoord>::max();
}
checkCudaErrors(hipMemcpyAsync(d_coord+numTuples*dim, tmp, sizeof(KdCoord)*dim, hipMemcpyHostToDevice, stream));
// Then allocate the kdNode Array
if(d_kdNodes == NULL) {
checkCudaErrors(hipMalloc((void **) &d_kdNodes, numTuples*sizeof(KdNode)));
} else {
cout << "InitialzeKdNode Error: kdNodes array already allocated" << endl;
exit(1);
}
// Call the init routine
hipLaunchKernelGGL(( cuInitializeKdNodesArray), dim3(numBlocks), dim3(numThreads), 0, stream, d_kdNodes, d_coord, numTuples, dim, numThreads*numBlocks);
checkCudaErrors(hipGetLastError());
}
}
/*
* initializeKdNodesArray is a Gpu class static method that allocated amd initializes the KdNode array
* in all of the GPUs. It also copies a portion of coordinates data over to each gpu.
* Inputs
* kdNodes[] pointer to a cpu side kdNodes array. If not null, the GPU side kdNodes array is copied to it.
* Normally this should be null
* coordinates[] Pointer to the CPU coordinate data.
* numTuples sint indicating number of coordinates and the number of kdNodes to be created
* dim number of dimensions.
*/
void Gpu::initializeKdNodesArray(KdCoord coordinates[], const sint numTuples, const sint dim){
if (coordinates == NULL) {
cout << "initializeKdNodesArray Error: Expecting coordinates data to send to GPU" << endl;
exit(1);
}
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++) {
KdCoord* ct = coordinates + gpuCnt * dim * numTuples/numGPUs;
gpus[gpuCnt]->initializeKdNodesArrayGPU(ct, numTuples/numGPUs, dim);
}
}
/*
* cuFillMem is a Gpu class method that fills a portion of memory with a constant value.
* Inputs
* d_pntr pointer to where the fill should start
* val value to fill with
* numTuples number of reference array entries
*/
__global__ void cuFillMem(uint d_pntr[], uint val, int num){
sint index = threadIdx.x + blockIdx.x * blockDim.x;
sint stride = blockDim.x * gridDim.x;
for ( sint j = 0+index; j < num; j+=stride) {
d_pntr[j] = val;
}
}
/*
* fillMemGPU is a Gpu class method that fills a portion of memory with a constant value.
* It calls the cuFillMem kernel to do so.
* Inputs
* d_pntr pointer to where the fill should start
* val value to fill with
* numTuples number of reference array entries
*/
void Gpu::fillMemGPU(uint* d_pntr, const uint val, const uint num) {
setDevice();
if (d_pntr == NULL) {
hipLaunchKernelGGL(( cuFillMem), dim3(numBlocks),dim3(numThreads), 0, stream, d_pntr, val, num);
checkCudaErrors(hipGetLastError());
} else {
cout << "fillMemGPU Error: device pointer is null" << endl;
exit(1);
}
}
void Gpu::fillMemGPU(sint* d_pntr, const sint val, const uint num) {
setDevice();
if (d_pntr == NULL) {
hipLaunchKernelGGL(( cuFillMem), dim3(numBlocks),dim3(numThreads), 0, stream, (uint*)d_pntr, val, num);
checkCudaErrors(hipGetLastError());
} else {
cout << "fillMemGPU Error: device pointer is null" << endl;
exit(1);
}
}
/*
* cuInitializeRefernece is a GPU kernel that initializes the reference arrays used in sorting and partitioning functions
* Each array value is set to its index.
* Inputs
* reference[] Pointer to the reference array to be initialized
* numTuples Integer indicating the number of elements to init.
*/
__global__ void cuInitializeReference(refIdx_t reference[], sint numTuples){
sint index = threadIdx.x + blockIdx.x * blockDim.x;
sint stride = blockDim.x * gridDim.x;
for ( sint j = 0+index; j < numTuples; j+=stride) {
reference[j] = (refIdx_t)j;
}
}
/*
* initializeReferenceGPU is a Gpu class method that allocates the reference arrays in the GPU and then calls
* cuInitializeReference kernel to init the arrays. dim+2 arrays are allocated but on dim arrays are initialized
* Inputs
* numTuples number of reference array entries
* dim number of dimensions
* Outputs
* references pointer to an array of pointers of CPU side reference arrays. If not null
the initialize arrays are copied back. Normally this should be null.
*/
void Gpu::initializeReferenceGPU(const sint numTuples, const sint p, const sint dim) {
setDevice();
for(sint i = 0; i < dim+2; i++) { // Take care of any null pointer
if (d_references[i] == NULL) {
// Allocate the space in the GPU
checkCudaErrors(hipMalloc((void **) &d_references[i], numTuples*sizeof(uint)));
}
}
// Now initialize on the first dim arrays
#pragma omp critical (launchLock)
{
setDevice();
hipLaunchKernelGGL(( cuInitializeReference), dim3(numBlocks),dim3(numThreads), 0, stream, d_references[p], numTuples);
checkCudaErrors(hipGetLastError());
}
}
/*
* getReferenceFromGPU is a Gpu class method that copies one of the gpu reference arrays
* bach to the CPU.
* Inputs
* numTuples number of reference array entries
* p indicates to copy the data from d_reference[p] array.
* Outputs
* reference pointer to a reference array on the CPU side.
*/
void Gpu::getReferenceFromGPU(refIdx_t* reference, const sint p, uint numTuples){
setDevice();
// If references is not null copy the init values back
if (reference != NULL) {
checkCudaErrors(hipMemcpyAsync(reference, d_references[p], numTuples*sizeof(refIdx_t), hipMemcpyDeviceToHost, stream));
}
}
/*
* getCoordinatesFromGPU is a Gpu class method that copies coordinate arrays
* bach to the CPU.
* Inputs
* numTuples number of tuples in the coordinate
* dim number of dimensions in a tuple.
* Outputs
* coord pointer to a coordinate array on the CPU side.
*/
void Gpu::getCoordinatesFromGPU(KdCoord* coord, const uint numTuples, const sint dim){
setDevice();
// If references is not null copy the init values back
if (coord != NULL) {
checkCudaErrors(hipMemcpyAsync(coord, d_coord, dim *numTuples*sizeof(KdCoord), hipMemcpyDeviceToHost, stream));
}
}
/*
* mergeSortRangeGPU is a Gpu class method that performs the sort on one dimension and in one GPU.
* Inputs
* start integer offset into the references[from] array where of where to start the sort
* num number of elements to sort
* from index of the references array to sort from
* to index of the references array to sort to
* p primary coordinate on which the sort will occur
* dim number of dimensions
* Output
*/
void Gpu::mergeSortRangeGPU(const sint start, const sint num, const sint from, const sint to, const sint p, const sint dim){
setDevice();
// First check that memory on the GPU has been allocated
if (d_coord == NULL || d_references[from] == NULL || d_references[to] == NULL) {
cout << "mergeSortRangeGPU Error: coordinates or references for are null" << endl;
exit(1);
}
// Set up refVal and tmpVal arrays
#pragma omp critical (launchLock)
{
setDevice();
if (d_values[from] == NULL){
checkCudaErrors(hipMalloc((void **) &d_values[from], num*sizeof(KdCoord)));
// Copy the coordinate of interest to the refVal array
hipLaunchKernelGGL(( cuCopyCoordinate), dim3(numBlocks),dim3(numThreads), 0, stream, d_coord+start*dim, d_values[from], d_references[from], from, dim, num);
}
if (d_values[to] == NULL)
checkCudaErrors(hipMalloc((void **) &d_values[to], num*sizeof(KdCoord)));
checkCudaErrors(hipGetLastError());
}
mergeSortSmpl(d_coord, // coordinate array
d_values[to], d_references[to]+start, // output arrays
d_iVal, d_iRef, // Intermediate arrays
d_values[from], d_references[from]+start, // Input arrays
num, 1, p, dim // sizes and directions
);
}
/*
* removeDuplicatesGPU is a Gpu class method that performs the duplicates removal for one of
* the dimensions of the coordinates.
* Inputs
* start integer offset into the references[from] array where of where to start the removal
* num number of elements to check for removal
* from index of the references array to remove from
* to index of the references arry to put the results
* p primary coordinate on which the removal will occur
* dim number of dimensions
* otherGpu pointer to another GPU. This function will compare the last tuple in that
* GPU to the first tuple in this one.
* otherNum number of tuples in the other GPU.
* Output
*/
sint Gpu::removeDuplicatesGPU(const sint start, const sint num, const sint from, const sint to,
const sint p, const sint dim, Gpu* otherGpu, sint otherNum){
if (d_values[from] == NULL || d_values[to] == NULL) {
cout << "values[from] or values[to] pointer is NULL" << endl;
exit(1);
}
if (d_references[from] == NULL || d_references[to] == NULL) {
cout << "references[from] or references[to] pointer is NULL" << endl;
exit(1);
}
// get the pointers to the data in the other GPU if required. This is to remove duplicates across GPU boundaries.
refIdx_t* otherRef = (otherGpu == NULL) ? NULL : otherGpu->d_references[from]+otherNum-1;
KdCoord* otherCoord = (otherGpu == NULL) ? NULL : otherGpu->d_coord;
sint end = removeDups(d_coord+start*dim, // Coordinate array
d_values[to], d_references[to]+start, // Output arrays
d_iVal, d_iRef, // Intermediate arrays
d_values[from], d_references[from]+start, // Input arrays
otherCoord, otherRef, // Pointers to data in the other GPU
p, dim, num, // sizes
numBlocks*numThreads // threads
);
return end;
}
/*
* copyRefValGPU is a Gpu class method that copies the contents of references[from] to the
* references[to] array. Likewise for the values arrays.
* Note that if the references[to] or the values[to] array pointer is null,
* the arrays will be allocated
* Inputs
* start integer offset into the references[from] array where of where to start the copy
* num number of elements to check for removal
* from index of the references array to remove from
* to index of the references array to put the results
* Output
*/
void Gpu::copyRefValGPU(sint start, sint num, sint from, sint to) {
setDevice();
// Check for NULL pointers
if (d_values[from] == NULL) {
cout << "copyRefValGPU Error: values[from] pointer is NULL" << endl;
exit(1);
}
if (d_references[from] == NULL) {
cout << "copyRefValGPU Error: references[from] pointer is NULL" << endl;
exit(1);
}
if (d_values[to] == NULL)
checkCudaErrors(hipMalloc((void **) &d_values[to], num*sizeof(KdCoord)));
if (d_references[to] == NULL)
checkCudaErrors(hipMalloc((void **) &d_references[to], num*sizeof(refIdx_t)));
// Call the copy function
copyRefVal(d_values[to], d_references[to]+start,
d_values[from], d_references[from]+start,
num, numBlocks*numThreads);
}
/*
* copyRefGPU is a Gpu class method that copies the contents of references[from] to the
* references[to] array.
* Note that if the references[to] array pointer is null, the arrays will be allocated
* Inputs
* start integer offset into the references[from] array where of where to start the removal
* num number of elements to copy
* from index of the references array to copy from
* to index of the references array to put the results
* Output
*/
void Gpu::copyRefGPU(sint start, sint num, sint from, sint to) {
this->setDevice();
// Check for NULL pointers
if (d_references[from] == NULL) {
cout << "copyRefGPU Error: references[from] pointer is NULL" << endl;
exit(1);
}
if (d_references[to] == NULL)
checkCudaErrors(hipMalloc((void **) &d_references[to], num*sizeof(refIdx_t)));
// Call the copy function
copyRef(d_references[to]+start,
d_references[from]+start,
num, numBlocks*numThreads);
}
/*
* balancedSwapGPU is a Gpu class method that is a wrapper around the balancedSwap function in mergSort.cu
* This function uses just one of the GPUs to swap coordinate data between GPUs such that all of the tuples
* in the is GPU is less than the tuples in the other GPU. It is a component of the multi GPU sort function.
* Inputs
* start integer offset into the references[from] array where of where to start the removal
* num number of elements to check for swap
* from index of the references array to swap from. Results remain in the reference[from]
* p primary coordinate on which the swap compare will occur
* dim number of dimensions
* otherGPU pointer to another GPU.
* Return
* pivot the index into the reference array on the other GPU below which were swapped with this GPU.
*/
sint Gpu::balancedSwapGPU(sint start, sint num, sint from, sint p, sint dim, Gpu* otherGpu){
setDevice();
return balancedSwap(this->d_coord,
this->d_values[from], this->d_references[from]+start,
otherGpu->d_coord,
otherGpu->d_values[from], otherGpu->d_references[from]+start,
1, p, dim, num, numBlocks*numThreads);
}
/*
* swapMergeGPU is a Gpu class method that is a wrapper around the mergeSwap function in mergSort.cu
* After the BalancedSwap function exchanges the coordinates between the two GPU, there remains
* two independently sorted arrays in each GPU. This function merges those into a single sorted array.
* Inputs
* start integer offset into the references[from] array where of where start of the lower sorted data
* num number of elements to check for swap
* from index of the references array to merge from.
* to index of the references array to merge to.
* mergePoint index of the start of the upper sorted data.
* p primary coordinate on which the merge compare will occur
* dim number of dimensions
* Output
*/
void Gpu::swapMergeGPU(sint start, sint num, sint from, sint to, sint mergePoint, const sint p, const sint dim){
setDevice();
mergeSwap(d_coord,
d_values[from],
d_references[from],
d_values[to],
d_references[to],
mergePoint, p, dim, num, numBlocks*numThreads);
}
/*
* mergeSort is a static Gpu class method that performs the sort and the duplicates removal for all of
* the dimensions across all GPUs.
* If there is 1 gpu, this function does a simple loop through the dimensions, first sorting then
* removing duplicated.
* If there are 2 gpus, this function, does a sort of the first dimension on the coordinates in
* each GPU. the gpus each halve half of the coordinates. Then it swaps the necessary tuples
* between GPUs such all of the tuples in gpu 0 are less than all of the tupes in gpu 1. Finally
* it copies the references from dimension 1 into each reference arrys of the other dimensions,
* sorts those arrays and does a duplicate removal.
* Inputs
* numTuples number of input coordinates and references
* dim number of dimensions
* Output
* end[] pointer to an array continuing the number of references after duplicate removal.
* This method writes to the pth entry in that array.
*/
void Gpu::mergeSort(sint end[], const sint numTuples, const sint dim){
int gpuEnds[numGPUs][dim];
sint NperG = numTuples/numGPUs;
refIdx_t maxRef = NperG;
for (int i=0; i<numGPUs; i++) gpus[i]->initMergeSortSmpl(numTuples);
if (numGPUs > 1) {
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++) {
// Initialize the reference array for the first coordinate
gpus[gpuCnt]->initializeReferenceGPU(NperG, 0, dim);
// And sort that array
gpus[gpuCnt]->mergeSortRangeGPU(0, NperG, 0, dim, 0, dim);
}
sync();
// Then swap data between the GPUs so all the data is gpu0 is less than
// all the data in gpu1 performed only on gpu0
sint pivot = gpus[0]->balancedSwapGPU(0, NperG, dim, 0, dim, gpus[1]);
cout << "Pivot = " << pivot << endl;
sync();
// Now merge the swapped data into a single sorted data set
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++){
gpus[gpuCnt]->swapMergeGPU(0, NperG, dim, 0, gpuCnt==0 ? NperG-pivot: pivot, 0, dim);
}
sync(); // wait
// And remove dups in all GPU
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++){
gpus[gpuCnt]->num = gpuEnds[gpuCnt][0] =
gpus[gpuCnt]->removeDuplicatesGPU(0, NperG, 0, dim, 0, dim, getGPU(gpuCnt-1), NperG);
// If some duplicates were remove the fill the empty locations with reference to max tuple value
if (gpus[gpuCnt]->num != NperG)
gpus[gpuCnt]->fillMemGPU(gpus[gpuCnt]->d_references[dim]+gpus[gpuCnt]->num, NperG, NperG-gpus[gpuCnt]->num);
gpus[gpuCnt]->copyRefValGPU(0, NperG, dim, 0);
}
sync(); // wait
// Get the median of the entire data set which is the last value on the lower GPU
gpus[0]->setDevice();
checkCudaErrors(hipMemcpyAsync(&firstNode, gpus[0]->d_references[0]+gpus[0]->num-1, sizeof(refIdx_t), hipMemcpyDeviceToHost, gpus[0]->stream));
// Then replace that one with an index of the max node so that the sort size is the same but will remain in the same place.
checkCudaErrors(hipMemcpyAsync(gpus[0]->d_references[0]+gpus[0]->num-1, &maxRef, sizeof(refIdx_t), hipMemcpyHostToDevice, gpus[0]->stream));
// And subtract 1 from number of elements in gpu 0;
gpus[0]->num -= 1;
// On all gpus copy the sorted p=0 array to the other arrays and sort,
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++){
for (int p=1; p<dim; p++) {
// Copy the references from p=0 to p=1..dim, sort and remove duplicates
gpus[gpuCnt]->copyRefGPU(0, NperG, 0, p);
gpus[gpuCnt]->mergeSortRangeGPU(0, NperG, p, dim, p, dim);
gpuEnds[gpuCnt][p] = gpus[gpuCnt]->removeDuplicatesGPU(0, NperG, dim, p, p, dim);
}
}
for (int i=0; i<dim; i++) {
end[i] = gpuEnds[0][i]<0 || gpuEnds[1][i]<0 ? -1 : gpuEnds[0][i] + gpuEnds[1][i];
}
} else {
//Get first node
gpus[0]->setDevice();
for (int p=0; p<dim; p++) {
gpus[0]->initializeReferenceGPU(NperG, p, dim);
gpus[0]->mergeSortRangeGPU(0, NperG, p, dim, p, dim);
end[p] = gpus[0]->removeDuplicatesGPU(0, NperG, dim, p, p, dim);
}
gpus[0]->num = end[0];
}
sync(); // Make sure all GPUs are done before freeing memory.
// Free the value arrays because they are not needed any more.
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++){
gpus[gpuCnt]->closeMergeSortSmpl();
for (int p=0; p<=dim; p++) {
checkCudaErrors(hipFree(gpus[gpuCnt]->d_values[p]));
}
}
}
/*
* buildKdTreeGPU is a Gpu class method that prepares the data for the gpu side partitioning and build tree
* and then calls those functions, once for each level. Note that all of the data should already be in the GPU
* after the mergeSortGPU function has been called. If any of those gpu data pointers are null, this function will
* either create them or error out.
* Inputs
* numTuples number of coordinates to build the tree on. all duplicates should be removed
* startP the first dimension to start the partitioning on.
* dim dimension of the coordinates
*/
refIdx_t Gpu::buildKdTreeGPU(const sint numTuples, const int startP, const sint dim) {
setDevice();
// Check to see if the GPU already has the references arrays and error out if not.
if (d_references == NULL) {
cout << "buildKdTree Error: device does not have the reference arrays" << endl;
exit(1);
} else {
for (sint i = 0; i < dim; i++)
if (d_references[i] == NULL) {
cout << "buildKdTree Error: device does not have the reference array " << i << endl;
exit(1);
}
}
if (d_references[dim] == NULL) { // If the last array in not there create it
checkCudaErrors(hipMalloc((void **) &d_references[dim], (numTuples)*sizeof(int)));
}
const sint tuplesDepth = int(floor(log2(float(numTuples))));
for (sint i=0; i<tuplesDepth-1; i++) {
sint p = (i + startP) % dim;
partitionDim( d_kdNodes, d_coord, d_references, p, dim, numTuples, i, numBlocks*numThreads);
}
sint p = (tuplesDepth + startP - 1) % dim;
partitionDimLast( d_kdNodes, d_coord, d_references, p, dim, numTuples, tuplesDepth-1, numBlocks*numThreads);
return rootNode;
}
/*
* buildKdTree is a static Gpu class method that starts the partitioning in one or two GPUs by calling buildTreeGPU
* Note that all of the data should already be in the GPU after the mergeSort function has been called.
* Inputs
* numTuples number of coordinates to build the tree on. all duplicates should be removed
* dim dimension of the coordinates
*
* Return index of the root node in the KdNodes array,.
*/
refIdx_t Gpu::buildKdTree(KdNode kdNodes[], const sint numTuples, const sint dim) {
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++) gpus[gpuCnt]->initBuildKdTree();
int startP = numGPUs == 1 ? 0 : 1;
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++){
gpus[gpuCnt]->buildKdTreeGPU(gpus[gpuCnt]->num, startP, dim);
}
if (numGPUs==2) {
// TODO read back the fist node data HERE
gpu1stNode.ltChild = gpus[0]->rootNode;
gpu1stNode.gtChild = gpus[1]->rootNode;
gpu1stNode.tuple = firstNode;
} else {
firstNode = gpus[0]->rootNode;
}
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++) gpus[gpuCnt]->closeBuildKdTree();
return firstNode;
}
/*
* cuVerifyKdTree is a GPU kernel that is used to verify that one depth level the kdTree is correct. The refs array hold the indices of the
* KdNodes at a particular level of the tree. For each entry in refs, this kernel will examine the child nodes to make sure that
* the coordinate of the ltChild is less than coordinate of self and the coordinate of the gtChild is greater than itself. If this test
* fails, the negated index of self is written to the nextRefs array. In addition the global d_verifyKTdreeError be written with an
* error code so that the cpu function only has to check the one variable for pass or fail.
* At the first call to this kernel for level 0, refs contain a single index which is the root of the tree. If no error is found, the indices
* of the ltChild and gtChild are written to the nextRefs array. The nextRefs array will be used as the refs array on the next call to this
* kernel. The nextRefs array must be double the size of the refs array.
* Finally each block of threads of the kernel adds the number of good knNoes it found to the g_sum array.
* Inputs
* kdNodes[] Pointer to the KdNodes array
* coord[] Pointer to the coordinates array
* refs[] pointer to the array of indices of the KdNodes to be tested.
* num sint indicating the number of indices in the refs array to be tested
* p sint indicating the primary coordinate for this level
* dim sint indicating the number fo dimensions
* Outputs
* nextRefs Pointer to the array where the indices of the child nodes will be stored
* g_sum pointer to the array where the count of good nodes found by each block will be held
*/
// TODO use a negative value in g_sums[0] to indicate an error instead so a global is not needed.
__device__ uint d_verifyKdTreeError;
// TODO create a __device__ function to handle the summation within a block.
__global__ void cuVerifyKdTree(const KdNode kdNodes[], const KdCoord coord[], sint g_sums[], refIdx_t nextRefs[], refIdx_t refs[], const sint num, const sint p, const sint dim) {
const sint pos = threadIdx.x + blockIdx.x * blockDim.x;
const sint tid = threadIdx.x;
const sint numThreads = gridDim.x * blockDim.x;
__shared__ sint s_sums[SHARED_SIZE_LIMIT];
sint myCount = 0;
refIdx_t node;
for (sint i = pos; i<num; i+=numThreads) {
node = refs[i];
if (node > -1) { // Is there a node here?
myCount++; // Count the node.
refIdx_t child = kdNodes[node].gtChild; // Save off the gt node
nextRefs[i*2+1] = child; // Put the child in the refs array for the next loop
if (child != -1) { // Check for proper comparison
sint cmp = cuSuperKeyCompare(coord+kdNodes[child].tuple*dim, coord+kdNodes[node].tuple*dim, p, dim);
if (cmp <= 0) { // gtChild .le. self is an error so indicate that.
// nextRefs[i*2+1] = -node; // Overwrite the child with the error code
d_verifyKdTreeError = 1; // and mark the error
}
}
// now the less than side.
child = kdNodes[node].ltChild;
nextRefs[i*2] = child; // Put the child in the refs array for the next loop
if (child != -1) {
sint cmp = cuSuperKeyCompare(coord+kdNodes[child].tuple*dim, coord+kdNodes[node].tuple*dim, p, dim);
if (cmp >= 0) { // gtChild .ge. self is an error so indicate that.
// nextRefs[i*2] = -node; // Overwrite the child with the error code
d_verifyKdTreeError = 1;
}
}
} else {
nextRefs[i*2] = -1; // If there was no nod here, make sure the next level knows that
nextRefs[i*2+1] = -1;
}
}
s_sums[tid] = myCount;
// Now sum up the number of nodes found using the standard Cuda reduction code.
__syncthreads();
for (sint s=blockDim.x/2; s>32; s>>=1)
{
if (tid < s)
s_sums[tid] = myCount = myCount + s_sums[tid + s];
__syncthreads();
}
if (tid<32)
{
// Fetch final intermediate sum from 2nd warp
if (blockDim.x >= 64) myCount += s_sums[tid + 32];
// Reduce final warp using shuffle
for (sint offset = warpSize/2; offset > 0; offset /= 2)
{
myCount += __shfl_down(myCount, offset);
}
}
if (tid == 0)
g_sums[blockIdx.x] += myCount; // and save off this block's sum.
}
/*
* blockReduce is a kernel which adds sums all of the values in the g_sum array. The final
* sum is returned in g_sum[0].
* Inputs:
* g_sum[] pointer to the array to be summed
* N sint indicating number of values to be summed
* Output
* g_sum[0] contains the final sum.
*/
__global__ void blockReduce(sint g_sums[], sint N) {
const sint numThreads = gridDim.x * blockDim.x;
const sint tid = threadIdx.x;
__shared__ sint s_sums[SHARED_SIZE_LIMIT];
sint mySum = 0;
// Read in the data to be summed
for (sint i = tid; (i)<N; i+=numThreads) {
mySum += g_sums[i];
}
s_sums[tid] = mySum;
// Now sum up the number of nodes found using the standard Cuda reduction code.
__syncthreads();
for (uint s=blockDim.x/2; s>32; s>>=1)
{
if (tid < s)
s_sums[tid] = mySum = mySum + s_sums[tid + s];
__syncthreads();
}
if (tid < 32)
{
// Fetch final intermediate sum from 2nd warp
if (blockDim.x >= 64) mySum += s_sums[tid + 32];
// Reduce final warp using shuffle
for (sint offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += __shfl_down(mySum, offset);
}
}
if (tid == 0)
g_sums[blockIdx.x] = mySum; // Save off this blocks sum.
}
/*
* verifyKdTreeGPU is a Gpu class method that sets up the data for the cuVerifyKdTree kernel and
* calls it once for each level of the kdTree. After each call it checks to see if the GPU kernel
*/
void Gpu::initVerifyKdTree() {
#pragma omp critical (launchLock)
{
setDevice();
// Allocate the arrays to store the midpoint references for this level
checkCudaErrors(hipMalloc((void **)&d_midRefs[0], 2 * num * sizeof(refIdx_t)));
checkCudaErrors(hipMalloc((void **)&d_midRefs[1], 2 * num * sizeof(refIdx_t)));
// Allocate and 0 out the partial sum array used to count the number of nodes.
checkCudaErrors(hipMalloc((void **)&d_sums, numBlocks * sizeof(uint)));
checkCudaErrors(hipMemset(d_sums, 0, numBlocks * sizeof(uint)));
}
}
void Gpu::closeVerifyKdTree() {
syncGPU();
#pragma omp critical (launchLock)
{
setDevice();
// Free the arrays to store the midpoint references for this level
checkCudaErrors(hipFree(d_midRefs[0]));
checkCudaErrors(hipFree(d_midRefs[1]));
checkCudaErrors(hipFree(d_sums));
}
}
/*
* verifyKdTreeGPU is a Gpu class method that sets up the data for the cuVerifyKdTree kernel and
* calls it once for each level of the kdTree. After each call it checks to see if the GPU kernel
* found an error and if so, some of the errors are printed and then the program exits.
* Inputs
* root index of the root node in the kdNodes array
* startP axis to start on. should always be less than dim
* dim number of dimensions
* numTuples number of KdNodes
*
* Return number of kdNodes found
*/
sint Gpu::verifyKdTreeGPU(const sint root, const sint startP, const sint dim, const sint numTuples) {
setDevice();
const sint logNumTuples = int(floor(log2(float(numTuples))));
// Put the root node in the children array for level 0
checkCudaErrors(hipMemcpyAsync(d_midRefs[0], &root, sizeof(refIdx_t), hipMemcpyHostToDevice, stream));
refIdx_t* nextChildren; // Used to setGPU the pointer to the where children will be put
refIdx_t* children; // Used to setGPU the pointer where the children will be read
// Clear the error flag in the GPU
sint verifyKdTreeError = 0;
hipMemcpyToSymbolAsync(d_verifyKdTreeError, &verifyKdTreeError,
sizeof(verifyKdTreeError),
0,hipMemcpyHostToDevice, stream);
// Loop through the levels
for (sint level = 0; level < logNumTuples+1; level++) {
const sint p = (level+startP) % dim; // Calculate the primary axis for this level
nextChildren = d_midRefs[(level+1) % 2];
children = d_midRefs[(level) % 2 % 2];
// Allocate the array to put the children of this level in. Needs to be twice the size of the current level.
// Check the current level and get the nodes for the next level. Only start enough thread to cover current level
sint threadsNeeded = 1<<level;
sint blocks;
// Calculate the right thread and block numbers
if (threadsNeeded > numThreads){
blocks = threadsNeeded/numThreads;
if (blocks > numBlocks) blocks = numBlocks;
} else {
blocks = 1;
}
#pragma omp critical (launchLock)
{
setDevice();
hipLaunchKernelGGL(( cuVerifyKdTree), dim3(blocks),dim3(numThreads), 0, stream, d_kdNodes,
d_coord,
d_sums,
nextChildren,
children,
(1<<level), p, dim);
checkCudaErrors(hipGetLastError());
}
// Check for error on the last run
hipMemcpyFromSymbolAsync(&verifyKdTreeError,
d_verifyKdTreeError,
sizeof(verifyKdTreeError),
0,hipMemcpyDeviceToHost, stream);
syncGPU(); // Wait
if (verifyKdTreeError != 0){ // See if the kernel for this level found an error
cout << "Verify Tree Error at level " << level << endl;
// Here is where we get the data back from the GPU and find the node with the arror
refIdx_t* h_children = new refIdx_t[2<<level];
checkCudaErrors(hipMemcpyAsync(h_children, nextChildren, (2<<level)*sizeof(refIdx_t), hipMemcpyDeviceToHost, stream));
cout << "First 10 nodes in error are ";
sint cnt = 0;
sint all = 2 << level;
for (sint i = 0; i < all; i++){
if (h_children[i] < 0) // Is it a failure?
if (cnt++ < 10) // Only print the first ten failures.
cout << "[" << i << "]" << -h_children[i] << " ";
}
cout << endl << "Total of " << cnt << " bad nodes found" << "out of " << all << endl;
return -1;
}
}
// Finally, add the sums of all the blocks together and return the final count.
#pragma omp critical (launchLock)
{
setDevice();
hipLaunchKernelGGL(( blockReduce), dim3(1),dim3(numThreads), 0, stream, d_sums, numBlocks);
checkCudaErrors(hipGetLastError());
}
sint numNodes;
checkCudaErrors(hipMemcpyAsync(&numNodes, d_sums, sizeof(numNodes), hipMemcpyDeviceToHost, stream));
return numNodes;
}
/*
* verifyKdTree is a static Gpu class method that set up and calls the verifyKdTreeGPU method
* on each GPU.
* Inputs
* kdNodes[] Pointer the the cpu copy of the kdNodes array. Currently unused
* root index of the root node in the kdNodes array
* dim number of dimensions
* numTuples number of KdNodes
*
* Return Total number of kdNodes found
*/
sint Gpu::verifyKdTree(KdNode kdNodes[], const sint root, const sint dim, const sint numTuples) {
// Set up memory for the verify tree functions
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++) gpus[gpuCnt]->initVerifyKdTree();
sint nodeCnts[numGPUs]; // to store the per gpu node counts.
if (numGPUs==2) {
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++){
refIdx_t l_root = gpuCnt==0 ? gpu1stNode.ltChild : gpu1stNode.gtChild;
nodeCnts[gpuCnt] = gpus[gpuCnt]->verifyKdTreeGPU(l_root, 1, dim, gpus[gpuCnt]->num);
}
} else {
nodeCnts[0] = gpus[0]->verifyKdTreeGPU(root, 0, dim, numTuples);
}
int nodeCnt = numGPUs == 2 ? 1 : 0;
for (int i = 0; i<numGPUs; i++) nodeCnt += nodeCnts[i];
// free the memory used for verifying the tree.
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++) gpus[gpuCnt]->closeVerifyKdTree();
return nodeCnt;
}
/*
* getKdNodesFromGPU is a Gpu class method that copies the GPU version of the KdNodes array to the host.
* Inputs
* numTuples size of kdNodes array to copy
* Output
* kdNodes[] pointer to the place where the kdNodes will be copied
*/
void Gpu::getKdNodesFromGPU(KdNode kdNodes[], const sint numTuples){
setDevice();
// Copy the knNodes array back
if (kdNodes != NULL && d_kdNodes != NULL){
checkCudaErrors(hipMemcpyAsync(kdNodes, d_kdNodes, (numTuples)*sizeof(KdNode), hipMemcpyDeviceToHost, stream));
} else {
if (kdNodes == NULL)
cout << "getKdNodesFromGPU Error: Don't know where to put the kdNodes" << endl;
if (d_kdNodes == NULL)
cout << "getKdNodesFromGPU Error: GPU copy of kdNodes is not available" << endl;
exit(1);
}
}
/*
* getKdTreeResults is a static Gpu class method that copies the KdNoes data and coordinate data
* from all GPUs to a local copy. Data from each GPU is concatenated into a single array so in
* the two GPU case, the returned indices need to be fixed.
* Inputs
* numTuples size of kdNodes array to copy
* Output
* kdNodes[] Host KdNodes array where data from the GPU should be put
* coord[] Host coordinate array where data from the GPU should be put
* * This is only used for the 2 GPU case where coordinate data
* may get reordered
*/
void Gpu::getKdTreeResults(KdNode kdNodes[], KdCoord coord[], const sint numTuples, const sint dim) {
// Copy the knNodes array back
int numPerGPU = numTuples/numGPUs;
if (kdNodes != NULL ){
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++){
gpus[gpuCnt]->getKdNodesFromGPU(kdNodes + gpuCnt*numPerGPU, numPerGPU);
}
if (numGPUs == 2) { // Fix the ref indices for the upper part of the array.
// copy the GPU first node to the host
kdNodes[firstNode].ltChild = gpu1stNode.ltChild;
kdNodes[firstNode].gtChild = gpu1stNode.gtChild + numPerGPU;
kdNodes[firstNode].tuple = gpu1stNode.tuple;
#pragma omp parallel for
for (int i = numPerGPU; i<numTuples; i++) {
if (kdNodes[i].ltChild >= 0) kdNodes[i].ltChild += numPerGPU;
if (kdNodes[i].gtChild >= 0) kdNodes[i].gtChild += numPerGPU;
if (kdNodes[i].tuple >= 0) kdNodes[i].tuple += numPerGPU;
}
if (coord != NULL ){ // If there are 2 GPUs, the coordinates need to be copied back
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++){
gpus[gpuCnt]->getCoordinatesFromGPU(coord + gpuCnt*numPerGPU*dim, numPerGPU, dim);
}
} else {
cout << "getKdTreeResults Error: Don't know where to put the coordinates" << endl;
exit(1);
}
}
} else {
cout << "getKdTreeResults Error: Don't know where to put the kdNodes" << endl;
exit(1);
}
}
inline bool IsGPUCapableP2P(hipDeviceProp_t *pProp)
{
#ifdef _WIN32
return (bool)(pProp->tccDriver ? true : false);
#else
return (bool)(pProp->major >= 2);
#endif
}
/*
* gpuSetup is a static class method that determines what GPUs are available and whether or
* not they are capable of UVM which is required for multi-GPU sort. It then creates an
* instance of the GPU class for each GPU to be used.
* Inputs
* gpu_max Maximum nmber of GPUs to apply. (cannot be greater than 2 for this version)
* threads Maximum number of threads to use on partitioning
* blocks Macimum number of blocks to use on partitioning
*/
void Gpu::gpuSetup(int gpu_max, int threads, int blocks, int dim){
// Number of GPUs
printf("Checking for multiple GPUs...\n");
sint gpu_n;
checkCudaErrors(hipGetDeviceCount(&gpu_n));
printf("CUDA-capable device count: %i\n", gpu_n);
gpu_n = min(gpu_n,gpu_max);
// Query device properties
hipDeviceProp_t prop[MAX_GPUS];
int gpuid[MAX_GPUS]; // We want to find the first two GPU's that can support P2P
int gpu_count = 0; // GPUs that meet the criteria
for (int i=0; i < gpu_n; i++)
{
checkCudaErrors(hipGetDeviceProperties(&prop[i], i));
// Only boards based on Fermi can support P2P
if ((prop[i].major >= 2))
{
// This is an array of P2P capable GPUs
gpuid[gpu_count++] = i;
}
printf("> GPU%d = \"%15s\" %s capable of Peer-to-Peer (P2P)\n", i, prop[i].name, (IsGPUCapableP2P(&prop[i]) ? "IS " : "NOT"));
}
if (gpu_count >= 2) {
// Check possibility for peer access
printf("\nChecking GPU(s) for support of peer to peer memory access...\n");
int can_access_peer_0_1, can_access_peer_1_0;
// In this case we just pick the first two that we can support
checkCudaErrors(hipDeviceCanAccessPeer(&can_access_peer_0_1, gpuid[0], gpuid[1]));
checkCudaErrors(hipDeviceCanAccessPeer(&can_access_peer_1_0, gpuid[1], gpuid[0]));
// Output results from P2P capabilities
printf("> Peer-to-Peer (P2P) access from %s (GPU%d) -> %s (GPU%d) : %s\n", prop[gpuid[0]].name, gpuid[0],
prop[gpuid[1]].name, gpuid[1] ,
can_access_peer_0_1 ? "Yes" : "No");
printf("> Peer-to-Peer (P2P) access from %s (GPU%d) -> %s (GPU%d) : %s\n", prop[gpuid[1]].name, gpuid[1],
prop[gpuid[0]].name, gpuid[0],
can_access_peer_1_0 ? "Yes" : "No");
if (can_access_peer_0_1 == 0 || can_access_peer_1_0 == 0)
{
printf("Peer to Peer access is not available between GPU%d <-> GPU%d, waiving test.\n", gpuid[0], gpuid[1]);
checkCudaErrors(hipSetDevice(gpuid[0]));
gpu_n = 1;
} else {
// Enable peer access
printf("Enabling peer access between GPU%d and GPU%d...\n", gpuid[0], gpuid[1]);
checkCudaErrors(hipSetDevice(gpuid[0]));
checkCudaErrors(hipDeviceEnablePeerAccess(gpuid[1], 0));
checkCudaErrors(hipSetDevice(gpuid[1]));
checkCudaErrors(hipDeviceEnablePeerAccess(gpuid[0], 0));
// Check that we got UVA on both devices
printf("Checking GPU%d and GPU%d for UVA capabilities...\n", gpuid[0], gpuid[1]);
const bool has_uva = (prop[gpuid[0]].unifiedAddressing && prop[gpuid[1]].unifiedAddressing);
printf("> %s (GPU%d) supports UVA: %s\n", prop[gpuid[0]].name, gpuid[0], (prop[gpuid[0]].unifiedAddressing ? "Yes" : "No"));
printf("> %s (GPU%d) supports UVA: %s\n", prop[gpuid[1]].name, gpuid[1], (prop[gpuid[1]].unifiedAddressing ? "Yes" : "No"));
if (has_uva)
{
printf("Both GPUs can support UVA, enabling...\n");
}
else
{
printf("At least one of the two GPUs does NOT support UVA.\n");
gpu_n = 1;
}
}
} else {
gpu_n = 1;
}
#ifdef FAKE_TWO
cout << "Faking 2 GPUs." << endl;
gpu_n = setNumGPUs(2);
for (int i = 0; i<gpu_n; i++){
gpus[i] = new Gpu(threads, blocks, gpuid[0], dim);
}
#else
gpu_n = setNumGPUs(gpu_n);
for (int i = 0; i<gpu_n; i++){
gpus[i] = new Gpu(threads, blocks, gpuid[i], dim);
}
#endif
}
| cf96b692a6787666b9588d6cd814a46f973bec42.cu | //
// Gpu.cu
// This file contains the definition of the Gpu class which provides the GPU
// API for the GPU fuctions requred to build the Kd tree.
//
// Created by John Robinson on 7/15/15.
// Copyright (c) 2015 John Robinson. All rights reserved.
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSEARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//
/********************************************************************************
/* DBUG defines
/********************************************************************************/
//#define FAKE_TWO // runs the Multi-GPU code on a single GPU
#include <limits>
#include <cuda_runtime.h>
#include <omp.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // Helper for shared that are common to CUDA Samples
#include "Gpu.h"
#include "mergeSort_common.h"
#include "removeDups_common.h"
#include "buildKdTree_common.h"
sint Gpu::numGPUs = 0;
Gpu* Gpu::gpus[MAX_GPUS] = {NULL};
refIdx_t Gpu::firstNode;
KdNode Gpu::gpu1stNode;
/*
* cuSuperKeyCompare function performs the compare between coordinates used in the sorting and partitioning
* functions. It starts by subtracting the primary coordinate or the pth coordinate and proceeds through each
* of the coordinates until it finds the non-zero difference. That difference is returned as the compare result.
* Inputs
* a[] Pointer to the first coordinate
* b[] Pointer to the second coordinate
* p index to the primary coordinate to compare. p must be less than dim
* dim Number of dimensions the coordinate has.
*
* Returns a long that is positive if a>b, 0 if equal an negative is a<b
*/
__device__ long cuSuperKeyCompare(const KdCoord a[], const KdCoord b[], const sint p, const sint dim)
{
KdCoord diff=0;
for (sint i = 0; i < dim; i++) {
sint r = i + p;
r = (r < dim) ? r : r - dim;
diff = a[r] - b[r];
if (diff != 0) {
break;
}
}
return diff;
}
/*
* cuSuperKeyCompareFirstDim is a GPU function that performs the same function as cuSuperKeyCompare.
* But in the case where the calling code has pre-fetched the first dimension or component, the this takes
* the A and B components as L values and only access the array values if the first components happen to be equal.
* Inputs
* ap first compare component l value
* bp first compare component l value
* *a a coordinates
* *b b coordinates
* p index of the first
* dim number of dimensions the coordinates have
*
* Returns a long that is positive if a>b, 0 if equal an negative is a<b
*/
__device__ long cuSuperKeyCompareFirstDim(const KdCoord ap, const KdCoord bp, const KdCoord *a, const KdCoord *b, const sint p, const sint dim)
{
KdCoord diff = ap - bp;
for (sint i = 1; diff == 0 && i < dim; i++) {
sint r = i + p;
r = (r < dim) ? r : r - dim;
diff = a[r] - b[r];
}
return diff;
}
/*
* cuInitializeKnodesArray is a GPU kernel that initializes the array of KdNodes that will eventually
* be the kdNode tree. Initialization include copying the coordinates from the coordinates array and
* initing the child node indices to -1 which is the terminal node indicator
* Inputs
* kdNodes Pointer to the array of uninitialized kd nodes.
* coordinates Pointer the the array coordinates.
* numTuples number of coordinates and kd nodes.
* dim dimension of the coordinates
* numTotalThreads number of threads being used to do the initing
*/
__global__ void cuInitializeKdNodesArray(KdNode kdNodes[], KdCoord coordinates[], const sint numTuples, const sint dim, sint numTotalThreads){
sint index = threadIdx.x + blockIdx.x * blockDim.x;
for (sint i = index; i < (numTuples); i+=numTotalThreads) {
kdNodes[i].tuple = i;
kdNodes[i].ltChild = -1;
kdNodes[i].gtChild = -1;
}
}
/*
* cuCupyCoordinate is a GPU kernel that copies the pth coordinate from the coordinate array into a 1 dimensional array.
* The copy is done suche that the value is at the same index as the reference in the d_ref array.
* Inputs
* coord[] pointer to the coordinates array.
* d_sval[] pointer to the value array that the data is to be copied too.
* d_ref[] pointer to reference array that it is should match
* p sint indicating the coordinate to copy
* dim sint indicating dimensions
* numTuples sint indicating number of values to copy
*/
__global__ void cuCopyCoordinate(const KdCoord coord[], KdCoord d_sval[], const refIdx_t d_ref[], const sint p, const sint dim, const sint numTuples){
sint index = threadIdx.x + blockIdx.x * blockDim.x;
sint stride = blockDim.x * gridDim.x;
for (sint j = 0+index; j < numTuples; j+=stride) {
refIdx_t t = d_ref[j];
d_sval[j] = coord[t*dim+p];
}
}
/*
* initializeKdNodesArrayGPU is a Gpu class method that allocated the KdNode array in the GPU and calls the cuInitilizeKdNodesArray.
* It also copies the coordinates data over to the gpu if it's not there already.
* Inputs
* coordinates[] Pointer to the coordinate data the to be put in the kdTree. Can be null if the GPU already has the coordinates
* numTuples sint indicating number of coordinates and the number of kdNodes to be created
* dim number of dimensions.
*/
void Gpu::initializeKdNodesArrayGPU(const KdCoord coordinates[], const sint numTuples, const sint dim){
// Make this whole fuction critical so that memory allocations will not fail.
#pragma omp critical (launchLock)
{
setDevice();
// First allocate memory for the coordinate array and copy it to the device
if (d_coord == NULL) {
checkCudaErrors(cudaMalloc((void **) &d_coord, (numTuples+1)*sizeof(int)*dim)); // Allocate an extra for max coord
checkCudaErrors(cudaMemcpyAsync(d_coord, coordinates, numTuples*sizeof(int)*dim, cudaMemcpyHostToDevice, stream));
} else if (d_coord != NULL) {
cout << "initializeKdNodesArrayGPU Error: coordinate array already allocated" << endl;
exit(1);
}
// Add an extra tuple at the end with all max values.
KdCoord tmp[dim];
for (int i=0; i<dim; i++){
tmp[i] = std::numeric_limits<KdCoord>::max();
}
checkCudaErrors(cudaMemcpyAsync(d_coord+numTuples*dim, tmp, sizeof(KdCoord)*dim, cudaMemcpyHostToDevice, stream));
// Then allocate the kdNode Array
if(d_kdNodes == NULL) {
checkCudaErrors(cudaMalloc((void **) &d_kdNodes, numTuples*sizeof(KdNode)));
} else {
cout << "InitialzeKdNode Error: kdNodes array already allocated" << endl;
exit(1);
}
// Call the init routine
cuInitializeKdNodesArray<<<numBlocks, numThreads, 0, stream>>>(d_kdNodes, d_coord, numTuples, dim, numThreads*numBlocks);
checkCudaErrors(cudaGetLastError());
}
}
/*
* initializeKdNodesArray is a Gpu class static method that allocated amd initializes the KdNode array
* in all of the GPUs. It also copies a portion of coordinates data over to each gpu.
* Inputs
* kdNodes[] pointer to a cpu side kdNodes array. If not null, the GPU side kdNodes array is copied to it.
* Normally this should be null
* coordinates[] Pointer to the CPU coordinate data.
* numTuples sint indicating number of coordinates and the number of kdNodes to be created
* dim number of dimensions.
*/
void Gpu::initializeKdNodesArray(KdCoord coordinates[], const sint numTuples, const sint dim){
if (coordinates == NULL) {
cout << "initializeKdNodesArray Error: Expecting coordinates data to send to GPU" << endl;
exit(1);
}
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++) {
KdCoord* ct = coordinates + gpuCnt * dim * numTuples/numGPUs;
gpus[gpuCnt]->initializeKdNodesArrayGPU(ct, numTuples/numGPUs, dim);
}
}
/*
* cuFillMem is a Gpu class method that fills a portion of memory with a constant value.
* Inputs
* d_pntr pointer to where the fill should start
* val value to fill with
* numTuples number of reference array entries
*/
__global__ void cuFillMem(uint d_pntr[], uint val, int num){
sint index = threadIdx.x + blockIdx.x * blockDim.x;
sint stride = blockDim.x * gridDim.x;
for ( sint j = 0+index; j < num; j+=stride) {
d_pntr[j] = val;
}
}
/*
* fillMemGPU is a Gpu class method that fills a portion of memory with a constant value.
* It calls the cuFillMem kernel to do so.
* Inputs
* d_pntr pointer to where the fill should start
* val value to fill with
* numTuples number of reference array entries
*/
void Gpu::fillMemGPU(uint* d_pntr, const uint val, const uint num) {
setDevice();
if (d_pntr == NULL) {
cuFillMem<<<numBlocks,numThreads, 0, stream>>>(d_pntr, val, num);
checkCudaErrors(cudaGetLastError());
} else {
cout << "fillMemGPU Error: device pointer is null" << endl;
exit(1);
}
}
void Gpu::fillMemGPU(sint* d_pntr, const sint val, const uint num) {
setDevice();
if (d_pntr == NULL) {
cuFillMem<<<numBlocks,numThreads, 0, stream>>>((uint*)d_pntr, val, num);
checkCudaErrors(cudaGetLastError());
} else {
cout << "fillMemGPU Error: device pointer is null" << endl;
exit(1);
}
}
/*
* cuInitializeRefernece is a GPU kernel that initializes the reference arrays used in sorting and partitioning functions
* Each array value is set to its index.
* Inputs
* reference[] Pointer to the reference array to be initialized
* numTuples Integer indicating the number of elements to init.
*/
__global__ void cuInitializeReference(refIdx_t reference[], sint numTuples){
sint index = threadIdx.x + blockIdx.x * blockDim.x;
sint stride = blockDim.x * gridDim.x;
for ( sint j = 0+index; j < numTuples; j+=stride) {
reference[j] = (refIdx_t)j;
}
}
/*
* initializeReferenceGPU is a Gpu class method that allocates the reference arrays in the GPU and then calls
* cuInitializeReference kernel to init the arrays. dim+2 arrays are allocated but on dim arrays are initialized
* Inputs
* numTuples number of reference array entries
* dim number of dimensions
* Outputs
* references pointer to an array of pointers of CPU side reference arrays. If not null
the initialize arrays are copied back. Normally this should be null.
*/
void Gpu::initializeReferenceGPU(const sint numTuples, const sint p, const sint dim) {
setDevice();
for(sint i = 0; i < dim+2; i++) { // Take care of any null pointer
if (d_references[i] == NULL) {
// Allocate the space in the GPU
checkCudaErrors(cudaMalloc((void **) &d_references[i], numTuples*sizeof(uint)));
}
}
// Now initialize on the first dim arrays
#pragma omp critical (launchLock)
{
setDevice();
cuInitializeReference<<<numBlocks,numThreads, 0, stream>>>(d_references[p], numTuples);
checkCudaErrors(cudaGetLastError());
}
}
/*
* getReferenceFromGPU is a Gpu class method that copies one of the gpu reference arrays
* bach to the CPU.
* Inputs
* numTuples number of reference array entries
* p indicates to copy the data from d_reference[p] array.
* Outputs
* reference pointer to a reference array on the CPU side.
*/
void Gpu::getReferenceFromGPU(refIdx_t* reference, const sint p, uint numTuples){
setDevice();
// If references is not null copy the init values back
if (reference != NULL) {
checkCudaErrors(cudaMemcpyAsync(reference, d_references[p], numTuples*sizeof(refIdx_t), cudaMemcpyDeviceToHost, stream));
}
}
/*
* getCoordinatesFromGPU is a Gpu class method that copies coordinate arrays
* bach to the CPU.
* Inputs
* numTuples number of tuples in the coordinate
* dim number of dimensions in a tuple.
* Outputs
* coord pointer to a coordinate array on the CPU side.
*/
void Gpu::getCoordinatesFromGPU(KdCoord* coord, const uint numTuples, const sint dim){
setDevice();
// If references is not null copy the init values back
if (coord != NULL) {
checkCudaErrors(cudaMemcpyAsync(coord, d_coord, dim *numTuples*sizeof(KdCoord), cudaMemcpyDeviceToHost, stream));
}
}
/*
* mergeSortRangeGPU is a Gpu class method that performs the sort on one dimension and in one GPU.
* Inputs
* start integer offset into the references[from] array where of where to start the sort
* num number of elements to sort
* from index of the references array to sort from
* to index of the references array to sort to
* p primary coordinate on which the sort will occur
* dim number of dimensions
* Output
*/
void Gpu::mergeSortRangeGPU(const sint start, const sint num, const sint from, const sint to, const sint p, const sint dim){
setDevice();
// First check that memory on the GPU has been allocated
if (d_coord == NULL || d_references[from] == NULL || d_references[to] == NULL) {
cout << "mergeSortRangeGPU Error: coordinates or references for are null" << endl;
exit(1);
}
// Set up refVal and tmpVal arrays
#pragma omp critical (launchLock)
{
setDevice();
if (d_values[from] == NULL){
checkCudaErrors(cudaMalloc((void **) &d_values[from], num*sizeof(KdCoord)));
// Copy the coordinate of interest to the refVal array
cuCopyCoordinate<<<numBlocks,numThreads, 0, stream>>>(d_coord+start*dim, d_values[from], d_references[from], from, dim, num);
}
if (d_values[to] == NULL)
checkCudaErrors(cudaMalloc((void **) &d_values[to], num*sizeof(KdCoord)));
checkCudaErrors(cudaGetLastError());
}
mergeSortSmpl(d_coord, // coordinate array
d_values[to], d_references[to]+start, // output arrays
d_iVal, d_iRef, // Intermediate arrays
d_values[from], d_references[from]+start, // Input arrays
num, 1, p, dim // sizes and directions
);
}
/*
* removeDuplicatesGPU is a Gpu class method that performs the duplicates removal for one of
* the dimensions of the coordinates.
* Inputs
* start integer offset into the references[from] array where of where to start the removal
* num number of elements to check for removal
* from index of the references array to remove from
* to index of the references arry to put the results
* p primary coordinate on which the removal will occur
* dim number of dimensions
* otherGpu pointer to another GPU. This function will compare the last tuple in that
* GPU to the first tuple in this one.
* otherNum number of tuples in the other GPU.
* Output
*/
sint Gpu::removeDuplicatesGPU(const sint start, const sint num, const sint from, const sint to,
const sint p, const sint dim, Gpu* otherGpu, sint otherNum){
if (d_values[from] == NULL || d_values[to] == NULL) {
cout << "values[from] or values[to] pointer is NULL" << endl;
exit(1);
}
if (d_references[from] == NULL || d_references[to] == NULL) {
cout << "references[from] or references[to] pointer is NULL" << endl;
exit(1);
}
// get the pointers to the data in the other GPU if required. This is to remove duplicates across GPU boundaries.
refIdx_t* otherRef = (otherGpu == NULL) ? NULL : otherGpu->d_references[from]+otherNum-1;
KdCoord* otherCoord = (otherGpu == NULL) ? NULL : otherGpu->d_coord;
sint end = removeDups(d_coord+start*dim, // Coordinate array
d_values[to], d_references[to]+start, // Output arrays
d_iVal, d_iRef, // Intermediate arrays
d_values[from], d_references[from]+start, // Input arrays
otherCoord, otherRef, // Pointers to data in the other GPU
p, dim, num, // sizes
numBlocks*numThreads // threads
);
return end;
}
/*
* copyRefValGPU is a Gpu class method that copies the contents of references[from] to the
* references[to] array. Likewise for the values arrays.
* Note that if the references[to] or the values[to] array pointer is null,
* the arrays will be allocated
* Inputs
* start integer offset into the references[from] array where of where to start the copy
* num number of elements to check for removal
* from index of the references array to remove from
* to index of the references array to put the results
* Output
*/
void Gpu::copyRefValGPU(sint start, sint num, sint from, sint to) {
setDevice();
// Check for NULL pointers
if (d_values[from] == NULL) {
cout << "copyRefValGPU Error: values[from] pointer is NULL" << endl;
exit(1);
}
if (d_references[from] == NULL) {
cout << "copyRefValGPU Error: references[from] pointer is NULL" << endl;
exit(1);
}
if (d_values[to] == NULL)
checkCudaErrors(cudaMalloc((void **) &d_values[to], num*sizeof(KdCoord)));
if (d_references[to] == NULL)
checkCudaErrors(cudaMalloc((void **) &d_references[to], num*sizeof(refIdx_t)));
// Call the copy function
copyRefVal(d_values[to], d_references[to]+start,
d_values[from], d_references[from]+start,
num, numBlocks*numThreads);
}
/*
* copyRefGPU is a Gpu class method that copies the contents of references[from] to the
* references[to] array.
* Note that if the references[to] array pointer is null, the arrays will be allocated
* Inputs
* start integer offset into the references[from] array where of where to start the removal
* num number of elements to copy
* from index of the references array to copy from
* to index of the references array to put the results
* Output
*/
void Gpu::copyRefGPU(sint start, sint num, sint from, sint to) {
this->setDevice();
// Check for NULL pointers
if (d_references[from] == NULL) {
cout << "copyRefGPU Error: references[from] pointer is NULL" << endl;
exit(1);
}
if (d_references[to] == NULL)
checkCudaErrors(cudaMalloc((void **) &d_references[to], num*sizeof(refIdx_t)));
// Call the copy function
copyRef(d_references[to]+start,
d_references[from]+start,
num, numBlocks*numThreads);
}
/*
* balancedSwapGPU is a Gpu class method that is a wrapper around the balancedSwap function in mergSort.cu
* This function uses just one of the GPUs to swap coordinate data between GPUs such that all of the tuples
* in the is GPU is less than the tuples in the other GPU. It is a component of the multi GPU sort function.
* Inputs
* start integer offset into the references[from] array where of where to start the removal
* num number of elements to check for swap
* from index of the references array to swap from. Results remain in the reference[from]
* p primary coordinate on which the swap compare will occur
* dim number of dimensions
* otherGPU pointer to another GPU.
* Return
* pivot the index into the reference array on the other GPU below which were swapped with this GPU.
*/
sint Gpu::balancedSwapGPU(sint start, sint num, sint from, sint p, sint dim, Gpu* otherGpu){
setDevice();
return balancedSwap(this->d_coord,
this->d_values[from], this->d_references[from]+start,
otherGpu->d_coord,
otherGpu->d_values[from], otherGpu->d_references[from]+start,
1, p, dim, num, numBlocks*numThreads);
}
/*
* swapMergeGPU is a Gpu class method that is a wrapper around the mergeSwap function in mergSort.cu
* After the BalancedSwap function exchanges the coordinates between the two GPU, there remains
* two independently sorted arrays in each GPU. This function merges those into a single sorted array.
* Inputs
* start integer offset into the references[from] array where of where start of the lower sorted data
* num number of elements to check for swap
* from index of the references array to merge from.
* to index of the references array to merge to.
* mergePoint index of the start of the upper sorted data.
* p primary coordinate on which the merge compare will occur
* dim number of dimensions
* Output
*/
void Gpu::swapMergeGPU(sint start, sint num, sint from, sint to, sint mergePoint, const sint p, const sint dim){
setDevice();
mergeSwap(d_coord,
d_values[from],
d_references[from],
d_values[to],
d_references[to],
mergePoint, p, dim, num, numBlocks*numThreads);
}
/*
* mergeSort is a static Gpu class method that performs the sort and the duplicates removal for all of
* the dimensions across all GPUs.
* If there is 1 gpu, this function does a simple loop through the dimensions, first sorting then
* removing duplicated.
* If there are 2 gpus, this function, does a sort of the first dimension on the coordinates in
* each GPU. the gpus each halve half of the coordinates. Then it swaps the necessary tuples
* between GPUs such all of the tuples in gpu 0 are less than all of the tupes in gpu 1. Finally
* it copies the references from dimension 1 into each reference arrys of the other dimensions,
* sorts those arrays and does a duplicate removal.
* Inputs
* numTuples number of input coordinates and references
* dim number of dimensions
* Output
* end[] pointer to an array continuing the number of references after duplicate removal.
* This method writes to the pth entry in that array.
*/
void Gpu::mergeSort(sint end[], const sint numTuples, const sint dim){
int gpuEnds[numGPUs][dim];
sint NperG = numTuples/numGPUs;
refIdx_t maxRef = NperG;
for (int i=0; i<numGPUs; i++) gpus[i]->initMergeSortSmpl(numTuples);
if (numGPUs > 1) {
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++) {
// Initialize the reference array for the first coordinate
gpus[gpuCnt]->initializeReferenceGPU(NperG, 0, dim);
// And sort that array
gpus[gpuCnt]->mergeSortRangeGPU(0, NperG, 0, dim, 0, dim);
}
sync();
// Then swap data between the GPUs so all the data is gpu0 is less than
// all the data in gpu1 performed only on gpu0
sint pivot = gpus[0]->balancedSwapGPU(0, NperG, dim, 0, dim, gpus[1]);
cout << "Pivot = " << pivot << endl;
sync();
// Now merge the swapped data into a single sorted data set
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++){
gpus[gpuCnt]->swapMergeGPU(0, NperG, dim, 0, gpuCnt==0 ? NperG-pivot: pivot, 0, dim);
}
sync(); // wait
// And remove dups in all GPU
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++){
gpus[gpuCnt]->num = gpuEnds[gpuCnt][0] =
gpus[gpuCnt]->removeDuplicatesGPU(0, NperG, 0, dim, 0, dim, getGPU(gpuCnt-1), NperG);
// If some duplicates were remove the fill the empty locations with reference to max tuple value
if (gpus[gpuCnt]->num != NperG)
gpus[gpuCnt]->fillMemGPU(gpus[gpuCnt]->d_references[dim]+gpus[gpuCnt]->num, NperG, NperG-gpus[gpuCnt]->num);
gpus[gpuCnt]->copyRefValGPU(0, NperG, dim, 0);
}
sync(); // wait
// Get the median of the entire data set which is the last value on the lower GPU
gpus[0]->setDevice();
checkCudaErrors(cudaMemcpyAsync(&firstNode, gpus[0]->d_references[0]+gpus[0]->num-1, sizeof(refIdx_t), cudaMemcpyDeviceToHost, gpus[0]->stream));
// Then replace that one with an index of the max node so that the sort size is the same but will remain in the same place.
checkCudaErrors(cudaMemcpyAsync(gpus[0]->d_references[0]+gpus[0]->num-1, &maxRef, sizeof(refIdx_t), cudaMemcpyHostToDevice, gpus[0]->stream));
// And subtract 1 from number of elements in gpu 0;
gpus[0]->num -= 1;
// On all gpus copy the sorted p=0 array to the other arrays and sort,
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++){
for (int p=1; p<dim; p++) {
// Copy the references from p=0 to p=1..dim, sort and remove duplicates
gpus[gpuCnt]->copyRefGPU(0, NperG, 0, p);
gpus[gpuCnt]->mergeSortRangeGPU(0, NperG, p, dim, p, dim);
gpuEnds[gpuCnt][p] = gpus[gpuCnt]->removeDuplicatesGPU(0, NperG, dim, p, p, dim);
}
}
for (int i=0; i<dim; i++) {
end[i] = gpuEnds[0][i]<0 || gpuEnds[1][i]<0 ? -1 : gpuEnds[0][i] + gpuEnds[1][i];
}
} else {
//Get first node
gpus[0]->setDevice();
for (int p=0; p<dim; p++) {
gpus[0]->initializeReferenceGPU(NperG, p, dim);
gpus[0]->mergeSortRangeGPU(0, NperG, p, dim, p, dim);
end[p] = gpus[0]->removeDuplicatesGPU(0, NperG, dim, p, p, dim);
}
gpus[0]->num = end[0];
}
sync(); // Make sure all GPUs are done before freeing memory.
// Free the value arrays because they are not needed any more.
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++){
gpus[gpuCnt]->closeMergeSortSmpl();
for (int p=0; p<=dim; p++) {
checkCudaErrors(cudaFree(gpus[gpuCnt]->d_values[p]));
}
}
}
/*
* buildKdTreeGPU is a Gpu class method that prepares the data for the gpu side partitioning and build tree
* and then calls those functions, once for each level. Note that all of the data should already be in the GPU
* after the mergeSortGPU function has been called. If any of those gpu data pointers are null, this function will
* either create them or error out.
* Inputs
* numTuples number of coordinates to build the tree on. all duplicates should be removed
* startP the first dimension to start the partitioning on.
* dim dimension of the coordinates
*/
refIdx_t Gpu::buildKdTreeGPU(const sint numTuples, const int startP, const sint dim) {
setDevice();
// Check to see if the GPU already has the references arrays and error out if not.
if (d_references == NULL) {
cout << "buildKdTree Error: device does not have the reference arrays" << endl;
exit(1);
} else {
for (sint i = 0; i < dim; i++)
if (d_references[i] == NULL) {
cout << "buildKdTree Error: device does not have the reference array " << i << endl;
exit(1);
}
}
if (d_references[dim] == NULL) { // If the last array in not there create it
checkCudaErrors(cudaMalloc((void **) &d_references[dim], (numTuples)*sizeof(int)));
}
const sint tuplesDepth = int(floor(log2(float(numTuples))));
for (sint i=0; i<tuplesDepth-1; i++) {
sint p = (i + startP) % dim;
partitionDim( d_kdNodes, d_coord, d_references, p, dim, numTuples, i, numBlocks*numThreads);
}
sint p = (tuplesDepth + startP - 1) % dim;
partitionDimLast( d_kdNodes, d_coord, d_references, p, dim, numTuples, tuplesDepth-1, numBlocks*numThreads);
return rootNode;
}
/*
* buildKdTree is a static Gpu class method that starts the partitioning in one or two GPUs by calling buildTreeGPU
* Note that all of the data should already be in the GPU after the mergeSort function has been called.
* Inputs
* numTuples number of coordinates to build the tree on. all duplicates should be removed
* dim dimension of the coordinates
*
* Return index of the root node in the KdNodes array,.
*/
refIdx_t Gpu::buildKdTree(KdNode kdNodes[], const sint numTuples, const sint dim) {
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++) gpus[gpuCnt]->initBuildKdTree();
int startP = numGPUs == 1 ? 0 : 1;
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++){
gpus[gpuCnt]->buildKdTreeGPU(gpus[gpuCnt]->num, startP, dim);
}
if (numGPUs==2) {
// TODO read back the fist node data HERE
gpu1stNode.ltChild = gpus[0]->rootNode;
gpu1stNode.gtChild = gpus[1]->rootNode;
gpu1stNode.tuple = firstNode;
} else {
firstNode = gpus[0]->rootNode;
}
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++) gpus[gpuCnt]->closeBuildKdTree();
return firstNode;
}
/*
* cuVerifyKdTree is a GPU kernel that is used to verify that one depth level the kdTree is correct. The refs array hold the indices of the
* KdNodes at a particular level of the tree. For each entry in refs, this kernel will examine the child nodes to make sure that
* the coordinate of the ltChild is less than coordinate of self and the coordinate of the gtChild is greater than itself. If this test
* fails, the negated index of self is written to the nextRefs array. In addition the global d_verifyKTdreeError be written with an
* error code so that the cpu function only has to check the one variable for pass or fail.
* At the first call to this kernel for level 0, refs contain a single index which is the root of the tree. If no error is found, the indices
* of the ltChild and gtChild are written to the nextRefs array. The nextRefs array will be used as the refs array on the next call to this
* kernel. The nextRefs array must be double the size of the refs array.
* Finally each block of threads of the kernel adds the number of good knNoes it found to the g_sum array.
* Inputs
* kdNodes[] Pointer to the KdNodes array
* coord[] Pointer to the coordinates array
* refs[] pointer to the array of indices of the KdNodes to be tested.
* num sint indicating the number of indices in the refs array to be tested
* p sint indicating the primary coordinate for this level
* dim sint indicating the number fo dimensions
* Outputs
* nextRefs Pointer to the array where the indices of the child nodes will be stored
* g_sum pointer to the array where the count of good nodes found by each block will be held
*/
// TODO use a negative value in g_sums[0] to indicate an error instead so a global is not needed.
__device__ uint d_verifyKdTreeError;
// TODO create a __device__ function to handle the summation within a block.
__global__ void cuVerifyKdTree(const KdNode kdNodes[], const KdCoord coord[], sint g_sums[], refIdx_t nextRefs[], refIdx_t refs[], const sint num, const sint p, const sint dim) {
const sint pos = threadIdx.x + blockIdx.x * blockDim.x;
const sint tid = threadIdx.x;
const sint numThreads = gridDim.x * blockDim.x;
__shared__ sint s_sums[SHARED_SIZE_LIMIT];
sint myCount = 0;
refIdx_t node;
for (sint i = pos; i<num; i+=numThreads) {
node = refs[i];
if (node > -1) { // Is there a node here?
myCount++; // Count the node.
refIdx_t child = kdNodes[node].gtChild; // Save off the gt node
nextRefs[i*2+1] = child; // Put the child in the refs array for the next loop
if (child != -1) { // Check for proper comparison
sint cmp = cuSuperKeyCompare(coord+kdNodes[child].tuple*dim, coord+kdNodes[node].tuple*dim, p, dim);
if (cmp <= 0) { // gtChild .le. self is an error so indicate that.
// nextRefs[i*2+1] = -node; // Overwrite the child with the error code
d_verifyKdTreeError = 1; // and mark the error
}
}
// now the less than side.
child = kdNodes[node].ltChild;
nextRefs[i*2] = child; // Put the child in the refs array for the next loop
if (child != -1) {
sint cmp = cuSuperKeyCompare(coord+kdNodes[child].tuple*dim, coord+kdNodes[node].tuple*dim, p, dim);
if (cmp >= 0) { // gtChild .ge. self is an error so indicate that.
// nextRefs[i*2] = -node; // Overwrite the child with the error code
d_verifyKdTreeError = 1;
}
}
} else {
nextRefs[i*2] = -1; // If there was no nod here, make sure the next level knows that
nextRefs[i*2+1] = -1;
}
}
s_sums[tid] = myCount;
// Now sum up the number of nodes found using the standard Cuda reduction code.
__syncthreads();
for (sint s=blockDim.x/2; s>32; s>>=1)
{
if (tid < s)
s_sums[tid] = myCount = myCount + s_sums[tid + s];
__syncthreads();
}
if (tid<32)
{
// Fetch final intermediate sum from 2nd warp
if (blockDim.x >= 64) myCount += s_sums[tid + 32];
// Reduce final warp using shuffle
for (sint offset = warpSize/2; offset > 0; offset /= 2)
{
myCount += __shfl_down(myCount, offset);
}
}
if (tid == 0)
g_sums[blockIdx.x] += myCount; // and save off this block's sum.
}
/*
* blockReduce is a kernel which adds sums all of the values in the g_sum array. The final
* sum is returned in g_sum[0].
* Inputs:
* g_sum[] pointer to the array to be summed
* N sint indicating number of values to be summed
* Output
* g_sum[0] contains the final sum.
*/
__global__ void blockReduce(sint g_sums[], sint N) {
const sint numThreads = gridDim.x * blockDim.x;
const sint tid = threadIdx.x;
__shared__ sint s_sums[SHARED_SIZE_LIMIT];
sint mySum = 0;
// Read in the data to be summed
for (sint i = tid; (i)<N; i+=numThreads) {
mySum += g_sums[i];
}
s_sums[tid] = mySum;
// Now sum up the number of nodes found using the standard Cuda reduction code.
__syncthreads();
for (uint s=blockDim.x/2; s>32; s>>=1)
{
if (tid < s)
s_sums[tid] = mySum = mySum + s_sums[tid + s];
__syncthreads();
}
if (tid < 32)
{
// Fetch final intermediate sum from 2nd warp
if (blockDim.x >= 64) mySum += s_sums[tid + 32];
// Reduce final warp using shuffle
for (sint offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += __shfl_down(mySum, offset);
}
}
if (tid == 0)
g_sums[blockIdx.x] = mySum; // Save off this blocks sum.
}
/*
* verifyKdTreeGPU is a Gpu class method that sets up the data for the cuVerifyKdTree kernel and
* calls it once for each level of the kdTree. After each call it checks to see if the GPU kernel
*/
void Gpu::initVerifyKdTree() {
#pragma omp critical (launchLock)
{
setDevice();
// Allocate the arrays to store the midpoint references for this level
checkCudaErrors(cudaMalloc((void **)&d_midRefs[0], 2 * num * sizeof(refIdx_t)));
checkCudaErrors(cudaMalloc((void **)&d_midRefs[1], 2 * num * sizeof(refIdx_t)));
// Allocate and 0 out the partial sum array used to count the number of nodes.
checkCudaErrors(cudaMalloc((void **)&d_sums, numBlocks * sizeof(uint)));
checkCudaErrors(cudaMemset(d_sums, 0, numBlocks * sizeof(uint)));
}
}
void Gpu::closeVerifyKdTree() {
syncGPU();
#pragma omp critical (launchLock)
{
setDevice();
// Free the arrays to store the midpoint references for this level
checkCudaErrors(cudaFree(d_midRefs[0]));
checkCudaErrors(cudaFree(d_midRefs[1]));
checkCudaErrors(cudaFree(d_sums));
}
}
/*
* verifyKdTreeGPU is a Gpu class method that sets up the data for the cuVerifyKdTree kernel and
* calls it once for each level of the kdTree. After each call it checks to see if the GPU kernel
* found an error and if so, some of the errors are printed and then the program exits.
* Inputs
* root index of the root node in the kdNodes array
* startP axis to start on. should always be less than dim
* dim number of dimensions
* numTuples number of KdNodes
*
* Return number of kdNodes found
*/
sint Gpu::verifyKdTreeGPU(const sint root, const sint startP, const sint dim, const sint numTuples) {
setDevice();
const sint logNumTuples = int(floor(log2(float(numTuples))));
// Put the root node in the children array for level 0
checkCudaErrors(cudaMemcpyAsync(d_midRefs[0], &root, sizeof(refIdx_t), cudaMemcpyHostToDevice, stream));
refIdx_t* nextChildren; // Used to setGPU the pointer to the where children will be put
refIdx_t* children; // Used to setGPU the pointer where the children will be read
// Clear the error flag in the GPU
sint verifyKdTreeError = 0;
cudaMemcpyToSymbolAsync(d_verifyKdTreeError, &verifyKdTreeError,
sizeof(verifyKdTreeError),
0,cudaMemcpyHostToDevice, stream);
// Loop through the levels
for (sint level = 0; level < logNumTuples+1; level++) {
const sint p = (level+startP) % dim; // Calculate the primary axis for this level
nextChildren = d_midRefs[(level+1) % 2];
children = d_midRefs[(level) % 2 % 2];
// Allocate the array to put the children of this level in. Needs to be twice the size of the current level.
// Check the current level and get the nodes for the next level. Only start enough thread to cover current level
sint threadsNeeded = 1<<level;
sint blocks;
// Calculate the right thread and block numbers
if (threadsNeeded > numThreads){
blocks = threadsNeeded/numThreads;
if (blocks > numBlocks) blocks = numBlocks;
} else {
blocks = 1;
}
#pragma omp critical (launchLock)
{
setDevice();
cuVerifyKdTree<<<blocks,numThreads, 0, stream>>>(d_kdNodes,
d_coord,
d_sums,
nextChildren,
children,
(1<<level), p, dim);
checkCudaErrors(cudaGetLastError());
}
// Check for error on the last run
cudaMemcpyFromSymbolAsync(&verifyKdTreeError,
d_verifyKdTreeError,
sizeof(verifyKdTreeError),
0,cudaMemcpyDeviceToHost, stream);
syncGPU(); // Wait
if (verifyKdTreeError != 0){ // See if the kernel for this level found an error
cout << "Verify Tree Error at level " << level << endl;
// Here is where we get the data back from the GPU and find the node with the arror
refIdx_t* h_children = new refIdx_t[2<<level];
checkCudaErrors(cudaMemcpyAsync(h_children, nextChildren, (2<<level)*sizeof(refIdx_t), cudaMemcpyDeviceToHost, stream));
cout << "First 10 nodes in error are ";
sint cnt = 0;
sint all = 2 << level;
for (sint i = 0; i < all; i++){
if (h_children[i] < 0) // Is it a failure?
if (cnt++ < 10) // Only print the first ten failures.
cout << "[" << i << "]" << -h_children[i] << " ";
}
cout << endl << "Total of " << cnt << " bad nodes found" << "out of " << all << endl;
return -1;
}
}
// Finally, add the sums of all the blocks together and return the final count.
#pragma omp critical (launchLock)
{
setDevice();
blockReduce<<<1,numThreads, 0, stream>>>(d_sums, numBlocks);
checkCudaErrors(cudaGetLastError());
}
sint numNodes;
checkCudaErrors(cudaMemcpyAsync(&numNodes, d_sums, sizeof(numNodes), cudaMemcpyDeviceToHost, stream));
return numNodes;
}
/*
* verifyKdTree is a static Gpu class method that set up and calls the verifyKdTreeGPU method
* on each GPU.
* Inputs
* kdNodes[] Pointer the the cpu copy of the kdNodes array. Currently unused
* root index of the root node in the kdNodes array
* dim number of dimensions
* numTuples number of KdNodes
*
* Return Total number of kdNodes found
*/
sint Gpu::verifyKdTree(KdNode kdNodes[], const sint root, const sint dim, const sint numTuples) {
// Set up memory for the verify tree functions
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++) gpus[gpuCnt]->initVerifyKdTree();
sint nodeCnts[numGPUs]; // to store the per gpu node counts.
if (numGPUs==2) {
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++){
refIdx_t l_root = gpuCnt==0 ? gpu1stNode.ltChild : gpu1stNode.gtChild;
nodeCnts[gpuCnt] = gpus[gpuCnt]->verifyKdTreeGPU(l_root, 1, dim, gpus[gpuCnt]->num);
}
} else {
nodeCnts[0] = gpus[0]->verifyKdTreeGPU(root, 0, dim, numTuples);
}
int nodeCnt = numGPUs == 2 ? 1 : 0;
for (int i = 0; i<numGPUs; i++) nodeCnt += nodeCnts[i];
// free the memory used for verifying the tree.
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++) gpus[gpuCnt]->closeVerifyKdTree();
return nodeCnt;
}
/*
* getKdNodesFromGPU is a Gpu class method that copies the GPU version of the KdNodes array to the host.
* Inputs
* numTuples size of kdNodes array to copy
* Output
* kdNodes[] pointer to the place where the kdNodes will be copied
*/
void Gpu::getKdNodesFromGPU(KdNode kdNodes[], const sint numTuples){
setDevice();
// Copy the knNodes array back
if (kdNodes != NULL && d_kdNodes != NULL){
checkCudaErrors(cudaMemcpyAsync(kdNodes, d_kdNodes, (numTuples)*sizeof(KdNode), cudaMemcpyDeviceToHost, stream));
} else {
if (kdNodes == NULL)
cout << "getKdNodesFromGPU Error: Don't know where to put the kdNodes" << endl;
if (d_kdNodes == NULL)
cout << "getKdNodesFromGPU Error: GPU copy of kdNodes is not available" << endl;
exit(1);
}
}
/*
* getKdTreeResults is a static Gpu class method that copies the KdNoes data and coordinate data
* from all GPUs to a local copy. Data from each GPU is concatenated into a single array so in
* the two GPU case, the returned indices need to be fixed.
* Inputs
* numTuples size of kdNodes array to copy
* Output
* kdNodes[] Host KdNodes array where data from the GPU should be put
* coord[] Host coordinate array where data from the GPU should be put
* * This is only used for the 2 GPU case where coordinate data
* may get reordered
*/
void Gpu::getKdTreeResults(KdNode kdNodes[], KdCoord coord[], const sint numTuples, const sint dim) {
// Copy the knNodes array back
int numPerGPU = numTuples/numGPUs;
if (kdNodes != NULL ){
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++){
gpus[gpuCnt]->getKdNodesFromGPU(kdNodes + gpuCnt*numPerGPU, numPerGPU);
}
if (numGPUs == 2) { // Fix the ref indices for the upper part of the array.
// copy the GPU first node to the host
kdNodes[firstNode].ltChild = gpu1stNode.ltChild;
kdNodes[firstNode].gtChild = gpu1stNode.gtChild + numPerGPU;
kdNodes[firstNode].tuple = gpu1stNode.tuple;
#pragma omp parallel for
for (int i = numPerGPU; i<numTuples; i++) {
if (kdNodes[i].ltChild >= 0) kdNodes[i].ltChild += numPerGPU;
if (kdNodes[i].gtChild >= 0) kdNodes[i].gtChild += numPerGPU;
if (kdNodes[i].tuple >= 0) kdNodes[i].tuple += numPerGPU;
}
if (coord != NULL ){ // If there are 2 GPUs, the coordinates need to be copied back
#pragma omp parallel for
for (int gpuCnt = 0; gpuCnt<numGPUs; gpuCnt++){
gpus[gpuCnt]->getCoordinatesFromGPU(coord + gpuCnt*numPerGPU*dim, numPerGPU, dim);
}
} else {
cout << "getKdTreeResults Error: Don't know where to put the coordinates" << endl;
exit(1);
}
}
} else {
cout << "getKdTreeResults Error: Don't know where to put the kdNodes" << endl;
exit(1);
}
}
inline bool IsGPUCapableP2P(cudaDeviceProp *pProp)
{
#ifdef _WIN32
return (bool)(pProp->tccDriver ? true : false);
#else
return (bool)(pProp->major >= 2);
#endif
}
/*
* gpuSetup is a static class method that determines what GPUs are available and whether or
* not they are capable of UVM which is required for multi-GPU sort. It then creates an
* instance of the GPU class for each GPU to be used.
* Inputs
* gpu_max Maximum nmber of GPUs to apply. (cannot be greater than 2 for this version)
* threads Maximum number of threads to use on partitioning
* blocks Macimum number of blocks to use on partitioning
*/
void Gpu::gpuSetup(int gpu_max, int threads, int blocks, int dim){
// Number of GPUs
printf("Checking for multiple GPUs...\n");
sint gpu_n;
checkCudaErrors(cudaGetDeviceCount(&gpu_n));
printf("CUDA-capable device count: %i\n", gpu_n);
gpu_n = min(gpu_n,gpu_max);
// Query device properties
cudaDeviceProp prop[MAX_GPUS];
int gpuid[MAX_GPUS]; // We want to find the first two GPU's that can support P2P
int gpu_count = 0; // GPUs that meet the criteria
for (int i=0; i < gpu_n; i++)
{
checkCudaErrors(cudaGetDeviceProperties(&prop[i], i));
// Only boards based on Fermi can support P2P
if ((prop[i].major >= 2))
{
// This is an array of P2P capable GPUs
gpuid[gpu_count++] = i;
}
printf("> GPU%d = \"%15s\" %s capable of Peer-to-Peer (P2P)\n", i, prop[i].name, (IsGPUCapableP2P(&prop[i]) ? "IS " : "NOT"));
}
if (gpu_count >= 2) {
// Check possibility for peer access
printf("\nChecking GPU(s) for support of peer to peer memory access...\n");
int can_access_peer_0_1, can_access_peer_1_0;
// In this case we just pick the first two that we can support
checkCudaErrors(cudaDeviceCanAccessPeer(&can_access_peer_0_1, gpuid[0], gpuid[1]));
checkCudaErrors(cudaDeviceCanAccessPeer(&can_access_peer_1_0, gpuid[1], gpuid[0]));
// Output results from P2P capabilities
printf("> Peer-to-Peer (P2P) access from %s (GPU%d) -> %s (GPU%d) : %s\n", prop[gpuid[0]].name, gpuid[0],
prop[gpuid[1]].name, gpuid[1] ,
can_access_peer_0_1 ? "Yes" : "No");
printf("> Peer-to-Peer (P2P) access from %s (GPU%d) -> %s (GPU%d) : %s\n", prop[gpuid[1]].name, gpuid[1],
prop[gpuid[0]].name, gpuid[0],
can_access_peer_1_0 ? "Yes" : "No");
if (can_access_peer_0_1 == 0 || can_access_peer_1_0 == 0)
{
printf("Peer to Peer access is not available between GPU%d <-> GPU%d, waiving test.\n", gpuid[0], gpuid[1]);
checkCudaErrors(cudaSetDevice(gpuid[0]));
gpu_n = 1;
} else {
// Enable peer access
printf("Enabling peer access between GPU%d and GPU%d...\n", gpuid[0], gpuid[1]);
checkCudaErrors(cudaSetDevice(gpuid[0]));
checkCudaErrors(cudaDeviceEnablePeerAccess(gpuid[1], 0));
checkCudaErrors(cudaSetDevice(gpuid[1]));
checkCudaErrors(cudaDeviceEnablePeerAccess(gpuid[0], 0));
// Check that we got UVA on both devices
printf("Checking GPU%d and GPU%d for UVA capabilities...\n", gpuid[0], gpuid[1]);
const bool has_uva = (prop[gpuid[0]].unifiedAddressing && prop[gpuid[1]].unifiedAddressing);
printf("> %s (GPU%d) supports UVA: %s\n", prop[gpuid[0]].name, gpuid[0], (prop[gpuid[0]].unifiedAddressing ? "Yes" : "No"));
printf("> %s (GPU%d) supports UVA: %s\n", prop[gpuid[1]].name, gpuid[1], (prop[gpuid[1]].unifiedAddressing ? "Yes" : "No"));
if (has_uva)
{
printf("Both GPUs can support UVA, enabling...\n");
}
else
{
printf("At least one of the two GPUs does NOT support UVA.\n");
gpu_n = 1;
}
}
} else {
gpu_n = 1;
}
#ifdef FAKE_TWO
cout << "Faking 2 GPUs." << endl;
gpu_n = setNumGPUs(2);
for (int i = 0; i<gpu_n; i++){
gpus[i] = new Gpu(threads, blocks, gpuid[0], dim);
}
#else
gpu_n = setNumGPUs(gpu_n);
for (int i = 0; i<gpu_n; i++){
gpus[i] = new Gpu(threads, blocks, gpuid[i], dim);
}
#endif
}
|
4d6cc52c395f4e083109f9e552334da23a7ee84f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/transpose.h>
#include <cuda_utils.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename T>
struct TranposeInputs {
T tolerance;
int len;
int n_row;
int n_col;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const TranposeInputs<T> &dims) {
return os;
}
template <typename T>
class TransposeTest : public ::testing::TestWithParam<TranposeInputs<T>> {
protected:
void SetUp() override {
CUBLAS_CHECK(hipblasCreate(&handle));
CUDA_CHECK(hipStreamCreate(&stream));
params = ::testing::TestWithParam<TranposeInputs<T>>::GetParam();
int len = params.len;
allocate(data, len);
ASSERT(params.len == 9, "This test works only with len=9!");
T data_h[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
updateDevice(data, data_h, len, stream);
allocate(data_trans_ref, len);
T data_ref_h[] = {1.0, 4.0, 7.0, 2.0, 5.0, 8.0, 3.0, 6.0, 9.0};
updateDevice(data_trans_ref, data_ref_h, len, stream);
allocate(data_trans, len);
transpose(data, data_trans, params.n_row, params.n_col, handle, stream);
transpose(data, params.n_row, stream);
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(data_trans));
CUDA_CHECK(hipFree(data_trans_ref));
CUBLAS_CHECK(hipblasDestroy(handle));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
TranposeInputs<T> params;
T *data, *data_trans, *data_trans_ref;
hipblasHandle_t handle;
hipStream_t stream;
};
const std::vector<TranposeInputs<float>> inputsf2 = {
{0.1f, 3 * 3, 3, 3, 1234ULL}};
const std::vector<TranposeInputs<double>> inputsd2 = {
{0.1, 3 * 3, 3, 3, 1234ULL}};
typedef TransposeTest<float> TransposeTestValF;
TEST_P(TransposeTestValF, Result) {
ASSERT_TRUE(devArrMatch(data_trans_ref, data_trans, params.len,
CompareApproxAbs<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(data_trans_ref, data, params.len,
CompareApproxAbs<float>(params.tolerance)));
}
typedef TransposeTest<double> TransposeTestValD;
TEST_P(TransposeTestValD, Result) {
ASSERT_TRUE(devArrMatch(data_trans_ref, data_trans, params.len,
CompareApproxAbs<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(data_trans_ref, data, params.len,
CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(TransposeTests, TransposeTestValF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(TransposeTests, TransposeTestValD,
::testing::ValuesIn(inputsd2));
} // end namespace LinAlg
} // end namespace MLCommon
| 4d6cc52c395f4e083109f9e552334da23a7ee84f.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/transpose.h>
#include <cuda_utils.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename T>
struct TranposeInputs {
T tolerance;
int len;
int n_row;
int n_col;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const TranposeInputs<T> &dims) {
return os;
}
template <typename T>
class TransposeTest : public ::testing::TestWithParam<TranposeInputs<T>> {
protected:
void SetUp() override {
CUBLAS_CHECK(cublasCreate(&handle));
CUDA_CHECK(cudaStreamCreate(&stream));
params = ::testing::TestWithParam<TranposeInputs<T>>::GetParam();
int len = params.len;
allocate(data, len);
ASSERT(params.len == 9, "This test works only with len=9!");
T data_h[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
updateDevice(data, data_h, len, stream);
allocate(data_trans_ref, len);
T data_ref_h[] = {1.0, 4.0, 7.0, 2.0, 5.0, 8.0, 3.0, 6.0, 9.0};
updateDevice(data_trans_ref, data_ref_h, len, stream);
allocate(data_trans, len);
transpose(data, data_trans, params.n_row, params.n_col, handle, stream);
transpose(data, params.n_row, stream);
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(data_trans));
CUDA_CHECK(cudaFree(data_trans_ref));
CUBLAS_CHECK(cublasDestroy(handle));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
TranposeInputs<T> params;
T *data, *data_trans, *data_trans_ref;
cublasHandle_t handle;
cudaStream_t stream;
};
const std::vector<TranposeInputs<float>> inputsf2 = {
{0.1f, 3 * 3, 3, 3, 1234ULL}};
const std::vector<TranposeInputs<double>> inputsd2 = {
{0.1, 3 * 3, 3, 3, 1234ULL}};
typedef TransposeTest<float> TransposeTestValF;
TEST_P(TransposeTestValF, Result) {
ASSERT_TRUE(devArrMatch(data_trans_ref, data_trans, params.len,
CompareApproxAbs<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(data_trans_ref, data, params.len,
CompareApproxAbs<float>(params.tolerance)));
}
typedef TransposeTest<double> TransposeTestValD;
TEST_P(TransposeTestValD, Result) {
ASSERT_TRUE(devArrMatch(data_trans_ref, data_trans, params.len,
CompareApproxAbs<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(data_trans_ref, data, params.len,
CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(TransposeTests, TransposeTestValF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(TransposeTests, TransposeTestValD,
::testing::ValuesIn(inputsd2));
} // end namespace LinAlg
} // end namespace MLCommon
|
037bb7301918feb5c7a626e6ed4e058143423a40.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
hipError_t error;
error = hipMalloc((void **) &d_A, mem_size_A);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_B, mem_size_B);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_C, mem_size_C);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 gridx(dimsB.x / threads.x);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y, 2);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(gridx), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
| 037bb7301918feb5c7a626e6ed4e058143423a40.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
cudaError_t error;
error = cudaMalloc((void **) &d_A, mem_size_A);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B, mem_size_B);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C, mem_size_C);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 gridx(dimsB.x / threads.x);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y, 2);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
matrixMulCUDA<16><<< gridx, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
|
efe7e6cf87ccbf6bd8e08afb44cf860a2a3a222c.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <cfloat>
#include <iostream>
#include "common.hpp"
using std::cout;
using std::endl;
#define BLOCKSIZE 1024
namespace ohem_space {
template<typename scalar_t>
__forceinline__ __device__ void reduce_sum(scalar_t *sdata, int blocksize, int tid) {
__syncthreads();
// NOTE: block size should be 2 ** x
for (int s{blocksize / 2}; s > 0; s >>= 1) {
if (tid < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
}
template<typename scalar_t>
__forceinline__ __device__ void reduce_max(scalar_t* sdata, int blocksize, int tid) {
__syncthreads();
for (int s{blocksize / 2}; s > 0; s >>= 1) {
if (tid < s) {
if (sdata[tid] < sdata[tid + s]) sdata[tid] = sdata[tid + s];
}
__syncthreads();
}
}
}
// kernel functions
template<typename scalar_t>
__global__ void OHEMGetScores(const int n_size,
const int dimsize, const int m_size,
const scalar_t *logits,
scalar_t *scores,
const int64_t *labels,
int *indices,
const int64_t ignore_index) {
// shared memory
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw);
int sample_offset = gridDim.x * blockDim.y;
sdata = sdata + blockDim.x * threadIdx.y;
int tid = threadIdx.x;
int sample_id = blockIdx.x * blockDim.y + threadIdx.y;
int samplesize = n_size * m_size;
for (int i{sample_id}; i < samplesize; i += sample_offset) {
indices[i] = i;
int n_idx = i / m_size;
int m_idx = i % m_size;
int64_t lb = labels[i];
if (lb == ignore_index) {
if (tid == 0) scores[i] = scalar_t(1.);
continue;
}
// obtain max
sdata[tid] = scalar_t(-10000.);
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (val > sdata[tid]) sdata[tid] = val;
}
__syncthreads();
ohem_space::reduce_max<scalar_t>(sdata, blockDim.x, tid);
scalar_t max_val = sdata[0];
// obtain exp sum
sdata[tid] = 0.;
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
sdata[tid] += expf(logits[idx] - max_val);
}
__syncthreads();
ohem_space::reduce_sum<scalar_t>(sdata, blockDim.x, tid);
if (tid == 0) {
int idx = n_idx * dimsize * m_size + lb * m_size + m_idx;
scores[i] = expf(logits[idx] - max_val) / sdata[0];
}
}
}
template<typename scalar_t>
__global__ void OHEMGetScoresSpatial(const int n_size,
const int dimsize, const int m_size,
const scalar_t *logits,
scalar_t *scores,
const int64_t *labels,
int *indices,
const int64_t ignore_index) {
int sample_offset = gridDim.x * blockDim.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int samplesize = n_size * m_size;
for (int i{tid}; i < samplesize; i += sample_offset) {
indices[i] = i;
int n_idx = i / m_size;
int m_idx = i % m_size;
int lb = static_cast<int>(labels[i]);
if (lb == ignore_index) {
scores[i] = scalar_t(1.);
continue;
}
// obtain max
scalar_t max_val = scalar_t(-10000.);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (val > max_val) max_val = val;
}
// obtain sum exp
scalar_t sum_exp = scalar_t(0.);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
sum_exp += expf(logits[idx] - max_val);
}
int idx = n_idx * dimsize * m_size + lb * m_size + m_idx;
scores[i] = expf(logits[idx] - max_val) / sum_exp;
}
}
template<typename scalar_t>
__global__ void OHEMSetLabels(const int samplesize,
const int *idx,
const scalar_t *scores,
int64_t *ohem_label,
const int64_t ignore_index,
const float score_thresh,
const int64_t n_min) {
int sample_offset = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i{static_cast<int>(n_min) + tid}; i < samplesize; i += sample_offset) {
if (scores[i] > score_thresh) ohem_label[idx[i]] = ignore_index;
}
}
// cuda functions
at::Tensor Score_ohem_label_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float score_thresh,
const int64_t n_min) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
const int n_size = logits.size(0);
const int dimsize = logits.size(1);
const int m_size = logits.numel() / (n_size * dimsize);
const int samplesize = labels.numel();
if (n_min >= samplesize) return labels;
// allocate memory and cuda grid/block
auto ohem_label = labels.clone();
auto scores = torch::empty_like(labels, logits.options());
thrust::device_vector<int> idx(samplesize);
if (ohem_label.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return ohem_label;
}
// call kernel
if (dimsize < 32 && samplesize > (4 * 1024)) {
int gridx = ::min((int)4096, int(samplesize / BLOCKSIZE));
gridx = ::max((int)1, gridx);
dim3 block1(BLOCKSIZE);
dim3 grid1(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "ohem score label", [&] {
hipLaunchKernelGGL(( OHEMGetScoresSpatial<scalar_t>), dim3(grid1), dim3(block1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
scores.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
thrust::raw_pointer_cast(&idx[0]),
ignore_index
);
});
} else {
int blockx = 32;
while (blockx < dimsize) blockx *= 2;
blockx = ::max(::min((int)BLOCKSIZE, blockx / 2), (int)32);
int blocky = ::min(samplesize, (int)(BLOCKSIZE / blockx));
blocky = ::max((int)1, blocky);
int gridx = ::min(4096, (int)(samplesize / blocky));
gridx = ::max((int)1, gridx);
int n_shm = blockx * blocky;
dim3 block1(blockx, blocky);
dim3 grid1(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "ohem score label", [&] {
int shm_size = n_shm * sizeof(scalar_t);
hipLaunchKernelGGL(( OHEMGetScores<scalar_t>), dim3(grid1), dim3(block1), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
scores.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
thrust::raw_pointer_cast(&idx[0]),
ignore_index
);
});
}
int grid2_num = ::min(4096, (int)(samplesize / BLOCKSIZE));
grid2_num = ::max((int)1, grid2_num);
dim3 block2(BLOCKSIZE);
dim3 grid2(grid2_num);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "ohem score label", [&] {
thrust::sort_by_key(
thrust::device,
scores.contiguous().data_ptr<scalar_t>(),
scores.contiguous().data_ptr<scalar_t>() + samplesize,
&idx[0]
);
hipLaunchKernelGGL(( OHEMSetLabels<scalar_t>), dim3(grid2), dim3(block2), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
samplesize, thrust::raw_pointer_cast(&idx[0]),
scores.contiguous().data_ptr<scalar_t>(),
ohem_label.contiguous().data_ptr<int64_t>(),
ignore_index, score_thresh, n_min
);
});
AT_CUDA_CHECK(hipGetLastError());
return ohem_label;
}
// python inferface
at::Tensor Score_ohem_label(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float score_thresh,
const int64_t n_min) {
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this ohem method only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return Score_ohem_label_cuda(logits, labels, ignore_index, score_thresh, n_min);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("score_ohem_label", &Score_ohem_label, "ohem by score on label");
}
| efe7e6cf87ccbf6bd8e08afb44cf860a2a3a222c.cu |
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <cfloat>
#include <iostream>
#include "common.hpp"
using std::cout;
using std::endl;
#define BLOCKSIZE 1024
namespace ohem_space {
template<typename scalar_t>
__forceinline__ __device__ void reduce_sum(scalar_t *sdata, int blocksize, int tid) {
__syncthreads();
// NOTE: block size should be 2 ** x
for (int s{blocksize / 2}; s > 0; s >>= 1) {
if (tid < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
}
template<typename scalar_t>
__forceinline__ __device__ void reduce_max(scalar_t* sdata, int blocksize, int tid) {
__syncthreads();
for (int s{blocksize / 2}; s > 0; s >>= 1) {
if (tid < s) {
if (sdata[tid] < sdata[tid + s]) sdata[tid] = sdata[tid + s];
}
__syncthreads();
}
}
}
// kernel functions
template<typename scalar_t>
__global__ void OHEMGetScores(const int n_size,
const int dimsize, const int m_size,
const scalar_t *logits,
scalar_t *scores,
const int64_t *labels,
int *indices,
const int64_t ignore_index) {
// shared memory
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw);
int sample_offset = gridDim.x * blockDim.y;
sdata = sdata + blockDim.x * threadIdx.y;
int tid = threadIdx.x;
int sample_id = blockIdx.x * blockDim.y + threadIdx.y;
int samplesize = n_size * m_size;
for (int i{sample_id}; i < samplesize; i += sample_offset) {
indices[i] = i;
int n_idx = i / m_size;
int m_idx = i % m_size;
int64_t lb = labels[i];
if (lb == ignore_index) {
if (tid == 0) scores[i] = scalar_t(1.);
continue;
}
// obtain max
sdata[tid] = scalar_t(-10000.);
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (val > sdata[tid]) sdata[tid] = val;
}
__syncthreads();
ohem_space::reduce_max<scalar_t>(sdata, blockDim.x, tid);
scalar_t max_val = sdata[0];
// obtain exp sum
sdata[tid] = 0.;
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
sdata[tid] += expf(logits[idx] - max_val);
}
__syncthreads();
ohem_space::reduce_sum<scalar_t>(sdata, blockDim.x, tid);
if (tid == 0) {
int idx = n_idx * dimsize * m_size + lb * m_size + m_idx;
scores[i] = expf(logits[idx] - max_val) / sdata[0];
}
}
}
template<typename scalar_t>
__global__ void OHEMGetScoresSpatial(const int n_size,
const int dimsize, const int m_size,
const scalar_t *logits,
scalar_t *scores,
const int64_t *labels,
int *indices,
const int64_t ignore_index) {
int sample_offset = gridDim.x * blockDim.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int samplesize = n_size * m_size;
for (int i{tid}; i < samplesize; i += sample_offset) {
indices[i] = i;
int n_idx = i / m_size;
int m_idx = i % m_size;
int lb = static_cast<int>(labels[i]);
if (lb == ignore_index) {
scores[i] = scalar_t(1.);
continue;
}
// obtain max
scalar_t max_val = scalar_t(-10000.);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (val > max_val) max_val = val;
}
// obtain sum exp
scalar_t sum_exp = scalar_t(0.);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
sum_exp += expf(logits[idx] - max_val);
}
int idx = n_idx * dimsize * m_size + lb * m_size + m_idx;
scores[i] = expf(logits[idx] - max_val) / sum_exp;
}
}
template<typename scalar_t>
__global__ void OHEMSetLabels(const int samplesize,
const int *idx,
const scalar_t *scores,
int64_t *ohem_label,
const int64_t ignore_index,
const float score_thresh,
const int64_t n_min) {
int sample_offset = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i{static_cast<int>(n_min) + tid}; i < samplesize; i += sample_offset) {
if (scores[i] > score_thresh) ohem_label[idx[i]] = ignore_index;
}
}
// cuda functions
at::Tensor Score_ohem_label_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float score_thresh,
const int64_t n_min) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
const int n_size = logits.size(0);
const int dimsize = logits.size(1);
const int m_size = logits.numel() / (n_size * dimsize);
const int samplesize = labels.numel();
if (n_min >= samplesize) return labels;
// allocate memory and cuda grid/block
auto ohem_label = labels.clone();
auto scores = torch::empty_like(labels, logits.options());
thrust::device_vector<int> idx(samplesize);
if (ohem_label.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return ohem_label;
}
// call kernel
if (dimsize < 32 && samplesize > (4 * 1024)) {
int gridx = std::min((int)4096, int(samplesize / BLOCKSIZE));
gridx = std::max((int)1, gridx);
dim3 block1(BLOCKSIZE);
dim3 grid1(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "ohem score label", [&] {
OHEMGetScoresSpatial<scalar_t><<<grid1, block1, 0, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
scores.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
thrust::raw_pointer_cast(&idx[0]),
ignore_index
);
});
} else {
int blockx = 32;
while (blockx < dimsize) blockx *= 2;
blockx = std::max(std::min((int)BLOCKSIZE, blockx / 2), (int)32);
int blocky = std::min(samplesize, (int)(BLOCKSIZE / blockx));
blocky = std::max((int)1, blocky);
int gridx = std::min(4096, (int)(samplesize / blocky));
gridx = std::max((int)1, gridx);
int n_shm = blockx * blocky;
dim3 block1(blockx, blocky);
dim3 grid1(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "ohem score label", [&] {
int shm_size = n_shm * sizeof(scalar_t);
OHEMGetScores<scalar_t><<<grid1, block1, shm_size, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
scores.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
thrust::raw_pointer_cast(&idx[0]),
ignore_index
);
});
}
int grid2_num = std::min(4096, (int)(samplesize / BLOCKSIZE));
grid2_num = std::max((int)1, grid2_num);
dim3 block2(BLOCKSIZE);
dim3 grid2(grid2_num);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "ohem score label", [&] {
thrust::sort_by_key(
thrust::device,
scores.contiguous().data_ptr<scalar_t>(),
scores.contiguous().data_ptr<scalar_t>() + samplesize,
&idx[0]
);
OHEMSetLabels<scalar_t><<<grid2, block2, 0, at::cuda::getCurrentCUDAStream()>>>(
samplesize, thrust::raw_pointer_cast(&idx[0]),
scores.contiguous().data_ptr<scalar_t>(),
ohem_label.contiguous().data_ptr<int64_t>(),
ignore_index, score_thresh, n_min
);
});
AT_CUDA_CHECK(cudaGetLastError());
return ohem_label;
}
// python inferface
at::Tensor Score_ohem_label(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float score_thresh,
const int64_t n_min) {
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this ohem method only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return Score_ohem_label_cuda(logits, labels, ignore_index, score_thresh, n_min);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("score_ohem_label", &Score_ohem_label, "ohem by score on label");
}
|
455116ce80a643c7038ac6e94071e01809964669.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*
* Slightly modified to provide timing support
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#define TIMING_SUPPORT
#ifdef TIMING_SUPPORT
#include <helper_cuda.h>
#include <helper_functions.h>
#endif
#define CUDA_TIMING
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements, int nIter)
{
for (int j = 0; j < nIter; j++) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialise the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int nIter = 100 ; // Number of iterations to run the kernel
int threadsPerBlock = 256;
// Note this pattern, based on integer division, for rounding up
int blocksPerGrid = 1 + ((numElements - 1) / threadsPerBlock);
printf("%d iterated launches of the CUDA kernel with %d blocks of %d threads\n",
nIter, blocksPerGrid, threadsPerBlock);
#ifdef TIMING_SUPPORT
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer); // create a timer
sdkStartTimer(&timer); // start the timer
#endif
#ifdef CUDA_TIMING
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
#endif
//for (int j = 0; j < nIter; j++)
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements, nIter);
#ifdef CUDA_TIMING
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
err = hipEventElapsedTime( &time, start, stop );
if (err != hipSuccess)
{
fprintf(stderr, "Failed to get elapsed time (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipEventDestroy( start );
hipEventDestroy( stop );
printf("CUDA_TIMING: %.4f ms\n", time);
#endif
// wait for device to finish
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
#ifdef TIMING_SUPPORT
// stop and destroy timer
sdkStopTimer(&timer);
double dSeconds = sdkGetTimerValue(&timer)/(1000.0);
double dNumOps = 1.0e-9 * nIter * size;
double gflops = dNumOps/dSeconds;
//Log throughput, etc
printf("Throughput = %.4f GFlops\nTime = %.5f s\nSize = %.5f Gops\n\n",
gflops, dSeconds, dNumOps);
sdkDeleteTimer(&timer);
#endif
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to reset the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
| 455116ce80a643c7038ac6e94071e01809964669.cu | /**
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*
* Slightly modified to provide timing support
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#define TIMING_SUPPORT
#ifdef TIMING_SUPPORT
#include <helper_cuda.h>
#include <helper_functions.h>
#endif
#define CUDA_TIMING
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements, int nIter)
{
for (int j = 0; j < nIter; j++) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialise the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int nIter = 100 ; // Number of iterations to run the kernel
int threadsPerBlock = 256;
// Note this pattern, based on integer division, for rounding up
int blocksPerGrid = 1 + ((numElements - 1) / threadsPerBlock);
printf("%d iterated launches of the CUDA kernel with %d blocks of %d threads\n",
nIter, blocksPerGrid, threadsPerBlock);
#ifdef TIMING_SUPPORT
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer); // create a timer
sdkStartTimer(&timer); // start the timer
#endif
#ifdef CUDA_TIMING
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
#endif
//for (int j = 0; j < nIter; j++)
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements, nIter);
#ifdef CUDA_TIMING
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
err = cudaEventElapsedTime( &time, start, stop );
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to get elapsed time (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf("CUDA_TIMING: %.4f ms\n", time);
#endif
// wait for device to finish
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
#ifdef TIMING_SUPPORT
// stop and destroy timer
sdkStopTimer(&timer);
double dSeconds = sdkGetTimerValue(&timer)/(1000.0);
double dNumOps = 1.0e-9 * nIter * size;
double gflops = dNumOps/dSeconds;
//Log throughput, etc
printf("Throughput = %.4f GFlops\nTime = %.5f s\nSize = %.5f Gops\n\n",
gflops, dSeconds, dNumOps);
sdkDeleteTimer(&timer);
#endif
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to reset the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
a2b4995cad43822a39ef99e876dfdd1070ac5d9b.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright by Contributors
#include <xgboost/data.h>
#include "../../../src/data/simple_dmatrix.h"
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include "../../../src/data/device_adapter.cuh"
#include "../helpers.h"
#include "test_array_interface.h"
#include "../../../src/data/array_interface.h"
using namespace xgboost; // NOLINT
TEST(SimpleDMatrix, FromColumnarDenseBasic) {
constexpr size_t kRows{16};
std::vector<Json> columns;
thrust::device_vector<double> d_data_0(kRows);
thrust::device_vector<uint32_t> d_data_1(kRows);
columns.emplace_back(GenerateDenseColumn<double>("<f8", kRows, &d_data_0));
columns.emplace_back(GenerateDenseColumn<uint32_t>("<u4", kRows, &d_data_1));
Json column_arr{columns};
std::string str;
Json::Dump(column_arr, &str);
data::CudfAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, std::numeric_limits<float>::quiet_NaN(),
-1);
EXPECT_EQ(dmat.Info().num_col_, 2);
EXPECT_EQ(dmat.Info().num_row_, 16);
EXPECT_EQ(dmat.Info().num_nonzero_, 32);
}
void TestDenseColumn(DMatrix* dmat, size_t n_rows, size_t n_cols) {
for (auto& batch : dmat->GetBatches<SparsePage>()) {
auto page = batch.GetView();
for (auto i = 0ull; i < batch.Size(); i++) {
auto inst = page[i];
for (auto j = 0ull; j < inst.size(); j++) {
EXPECT_EQ(inst[j].fvalue, i * 2);
EXPECT_EQ(inst[j].index, j);
}
}
}
ASSERT_EQ(dmat->Info().num_row_, n_rows);
ASSERT_EQ(dmat->Info().num_col_, n_cols);
}
TEST(SimpleDMatrix, FromColumnarDense) {
constexpr size_t kRows{16};
constexpr size_t kCols{2};
std::vector<Json> columns;
thrust::device_vector<float> d_data_0(kRows);
thrust::device_vector<int32_t> d_data_1(kRows);
columns.emplace_back(GenerateDenseColumn<float>("<f4", kRows, &d_data_0));
columns.emplace_back(GenerateDenseColumn<int32_t>("<i4", kRows, &d_data_1));
Json column_arr{columns};
std::string str;
Json::Dump(column_arr, &str);
// no missing value
{
data::CudfAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, std::numeric_limits<float>::quiet_NaN(),
-1);
TestDenseColumn(&dmat, kRows, kCols);
}
// with missing value specified
{
data::CudfAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, 4.0, -1);
ASSERT_EQ(dmat.Info().num_row_, kRows);
ASSERT_EQ(dmat.Info().num_col_, kCols);
ASSERT_EQ(dmat.Info().num_nonzero_, kCols * kRows - 2);
}
{
// no missing value, but has NaN
d_data_0[3] = std::numeric_limits<float>::quiet_NaN();
ASSERT_TRUE(std::isnan(d_data_0[3])); // removes 6.0
data::CudfAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, std::numeric_limits<float>::quiet_NaN(),
-1);
ASSERT_EQ(dmat.Info().num_nonzero_, kRows * kCols - 1);
ASSERT_EQ(dmat.Info().num_row_, kRows);
ASSERT_EQ(dmat.Info().num_col_, kCols);
}
}
TEST(SimpleDMatrix, FromColumnarWithEmptyRows) {
constexpr size_t kRows = 102;
constexpr size_t kCols = 24;
std::vector<Json> v_columns(kCols);
std::vector<dh::device_vector<float>> columns_data(kCols);
std::vector<dh::device_vector<RBitField8::value_type>> column_bitfields(
kCols);
RBitField8::value_type constexpr kUCOne = 1;
for (size_t i = 0; i < kCols; ++i) {
auto& col = v_columns[i];
col = Object();
auto& data = columns_data[i];
data.resize(kRows);
thrust::sequence(data.begin(), data.end(), 0);
dh::safe_cuda(hipDeviceSynchronize());
dh::safe_cuda(hipGetLastError());
ASSERT_EQ(data.size(), kRows);
auto p_d_data = raw_pointer_cast(data.data());
std::vector<Json> j_data{
Json(Integer(reinterpret_cast<Integer::Int>(p_d_data))),
Json(Boolean(false))};
col["data"] = j_data;
std::vector<Json> j_shape{Json(Integer(static_cast<Integer::Int>(kRows)))};
col["shape"] = Array(j_shape);
col["version"] = 3;
col["typestr"] = String("<f4");
// Construct the mask object.
col["mask"] = Object();
auto& j_mask = col["mask"];
j_mask["version"] = 3;
auto& mask_storage = column_bitfields[i];
mask_storage.resize(16); // 16 bytes
mask_storage[0] = ~(kUCOne << 2); // 3^th row is missing
mask_storage[1] = ~(kUCOne << 3); // 12^th row is missing
size_t last_ind = 12;
mask_storage[last_ind] = ~(kUCOne << 5);
std::set<size_t> missing_row_index{0, 1, last_ind};
for (size_t j = 0; j < mask_storage.size(); ++j) {
if (missing_row_index.find(j) == missing_row_index.cend()) {
// all other rows are valid
mask_storage[j] = ~0;
}
}
j_mask["data"] = std::vector<Json>{
Json(
Integer(reinterpret_cast<Integer::Int>(mask_storage.data().get()))),
Json(Boolean(false))};
j_mask["shape"] = Array(
std::vector<Json>{Json(Integer(static_cast<Integer::Int>(kRows)))});
j_mask["typestr"] = String("|i1");
}
Json column_arr{Array(v_columns)};
std::string str;
Json::Dump(column_arr, &str);
data::CudfAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, std::numeric_limits<float>::quiet_NaN(),
-1);
for (auto& batch : dmat.GetBatches<SparsePage>()) {
auto page = batch.GetView();
for (auto i = 0ull; i < batch.Size(); i++) {
auto inst = page[i];
for (auto j = 0ull; j < inst.size(); j++) {
EXPECT_EQ(inst[j].fvalue, i);
EXPECT_EQ(inst[j].index, j);
}
}
}
ASSERT_EQ(dmat.Info().num_nonzero_, (kRows - 3) * kCols);
ASSERT_EQ(dmat.Info().num_row_, kRows);
ASSERT_EQ(dmat.Info().num_col_, kCols);
}
TEST(SimpleCSRSource, FromColumnarSparse) {
constexpr size_t kRows = 32;
constexpr size_t kCols = 2;
RBitField8::value_type constexpr kUCOne = 1;
std::vector<dh::device_vector<float>> columns_data(kCols);
std::vector<dh::device_vector<RBitField8::value_type>> column_bitfields(kCols);
{
// column 0
auto& mask = column_bitfields[0];
mask.resize(8);
for (size_t j = 0; j < mask.size(); ++j) {
mask[j] = ~0;
}
// the 2^th entry of first column is invalid
// [0 0 0 0 0 1 0 0]
mask[0] = ~(kUCOne << 2);
}
{
// column 1
auto& mask = column_bitfields[1];
mask.resize(8);
for (size_t j = 0; j < mask.size(); ++j) {
mask[j] = ~0;
}
// the 19^th entry of second column is invalid
// [~0~], [~0~], [0 0 0 0 1 0 0 0]
mask[2] = ~(kUCOne << 3);
}
for (size_t c = 0; c < kCols; ++c) {
columns_data[c].resize(kRows);
thrust::sequence(columns_data[c].begin(), columns_data[c].end(), 0);
}
std::vector<Json> j_columns(kCols);
for (size_t c = 0; c < kCols; ++c) {
auto& column = j_columns[c];
column = Object();
column["version"] = 3;
column["typestr"] = String("<f4");
auto p_d_data = raw_pointer_cast(columns_data[c].data());
std::vector<Json> j_data {
Json(Integer(reinterpret_cast<Integer::Int>(p_d_data))),
Json(Boolean(false))};
column["data"] = j_data;
std::vector<Json> j_shape {Json(Integer(static_cast<Integer::Int>(kRows)))};
column["shape"] = Array(j_shape);
column["version"] = 3;
column["typestr"] = String("<f4");
column["mask"] = Object();
auto& j_mask = column["mask"];
j_mask["version"] = 3;
j_mask["data"] = std::vector<Json>{
Json(Integer(reinterpret_cast<Integer::Int>(column_bitfields[c].data().get()))),
Json(Boolean(false))};
j_mask["shape"] = Array(std::vector<Json>{Json(Integer(static_cast<Integer::Int>(kRows)))});
j_mask["typestr"] = String("|i1");
}
Json column_arr {Array(j_columns)};
std::string str;
Json::Dump(column_arr, &str);
{
data::CudfAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, std::numeric_limits<float>::quiet_NaN(), -1);
ASSERT_EQ(dmat.Info().num_row_, kRows);
ASSERT_EQ(dmat.Info().num_nonzero_, (kRows*kCols)-2);
}
{
data::CudfAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, 2.0, -1);
for (auto& batch : dmat.GetBatches<SparsePage>()) {
auto page = batch.GetView();
for (auto i = 0ull; i < batch.Size(); i++) {
auto inst = page[i];
for (auto e : inst) {
ASSERT_NE(e.fvalue, 2.0);
}
}
}
}
{
// no missing value, but has NaN
data::CudfAdapter adapter(str);
columns_data[0][4] = std::numeric_limits<float>::quiet_NaN(); // 0^th column 4^th row
data::SimpleDMatrix dmat(&adapter, std::numeric_limits<float>::quiet_NaN(),
-1);
ASSERT_TRUE(std::isnan(columns_data[0][4]));
// Two invalid entries and one NaN, in CSC
// 0^th column: 0, 1, 4, 5, 6, ..., kRows
// 1^th column: 0, 1, 2, 3, ..., 19, 21, ..., kRows
ASSERT_EQ(dmat.Info().num_nonzero_, kRows * kCols - 3);
}
}
TEST(SimpleDMatrix, FromColumnarSparseBasic) {
constexpr size_t kRows{16};
std::vector<Json> columns;
thrust::device_vector<double> d_data_0(kRows);
thrust::device_vector<uint32_t> d_data_1(kRows);
columns.emplace_back(GenerateSparseColumn<double>("<f8", kRows, &d_data_0));
columns.emplace_back(GenerateSparseColumn<uint32_t>("<u4", kRows, &d_data_1));
Json column_arr{columns};
std::string str;
Json::Dump(column_arr, &str);
data::CudfAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, std::numeric_limits<float>::quiet_NaN(),
-1);
EXPECT_EQ(dmat.Info().num_col_, 2);
EXPECT_EQ(dmat.Info().num_row_, 16);
EXPECT_EQ(dmat.Info().num_nonzero_, 32);
for (auto& batch : dmat.GetBatches<SparsePage>()) {
auto page = batch.GetView();
for (auto i = 0ull; i < batch.Size(); i++) {
auto inst = page[i];
for (auto j = 0ull; j < inst.size(); j++) {
EXPECT_EQ(inst[j].fvalue, i * 2);
EXPECT_EQ(inst[j].index, j);
}
}
}
}
TEST(SimpleDMatrix, FromCupy){
int rows = 50;
int cols = 10;
thrust::device_vector< float> data(rows*cols);
auto json_array_interface = Generate2dArrayInterface(rows, cols, "<f4", &data);
std::string str;
Json::Dump(json_array_interface, &str);
data::CupyAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, -1, 1);
EXPECT_EQ(dmat.Info().num_col_, cols);
EXPECT_EQ(dmat.Info().num_row_, rows);
EXPECT_EQ(dmat.Info().num_nonzero_, rows*cols);
for (auto& batch : dmat.GetBatches<SparsePage>()) {
auto page = batch.GetView();
for (auto i = 0ull; i < batch.Size(); i++) {
auto inst = page[i];
for (auto j = 0ull; j < inst.size(); j++) {
EXPECT_EQ(inst[j].fvalue, i * cols + j);
EXPECT_EQ(inst[j].index, j);
}
}
}
}
TEST(SimpleDMatrix, FromCupySparse){
int rows = 2;
int cols = 2;
thrust::device_vector< float> data(rows*cols);
auto json_array_interface = Generate2dArrayInterface(rows, cols, "<f4", &data);
data[1] = std::numeric_limits<float>::quiet_NaN();
data[2] = std::numeric_limits<float>::quiet_NaN();
std::string str;
Json::Dump(json_array_interface, &str);
data::CupyAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, -1, 1);
EXPECT_EQ(dmat.Info().num_col_, cols);
EXPECT_EQ(dmat.Info().num_row_, rows);
EXPECT_EQ(dmat.Info().num_nonzero_, rows * cols - 2);
auto& batch = *dmat.GetBatches<SparsePage>().begin();
auto page = batch.GetView();
auto inst0 = page[0];
auto inst1 = page[1];
EXPECT_EQ(page[0].size(), 1);
EXPECT_EQ(page[1].size(), 1);
EXPECT_EQ(page[0][0].fvalue, 0.0f);
EXPECT_EQ(page[0][0].index, 0);
EXPECT_EQ(page[1][0].fvalue, 3.0f);
EXPECT_EQ(page[1][0].index, 1);
}
| a2b4995cad43822a39ef99e876dfdd1070ac5d9b.cu | // Copyright by Contributors
#include <xgboost/data.h>
#include "../../../src/data/simple_dmatrix.h"
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include "../../../src/data/device_adapter.cuh"
#include "../helpers.h"
#include "test_array_interface.h"
#include "../../../src/data/array_interface.h"
using namespace xgboost; // NOLINT
TEST(SimpleDMatrix, FromColumnarDenseBasic) {
constexpr size_t kRows{16};
std::vector<Json> columns;
thrust::device_vector<double> d_data_0(kRows);
thrust::device_vector<uint32_t> d_data_1(kRows);
columns.emplace_back(GenerateDenseColumn<double>("<f8", kRows, &d_data_0));
columns.emplace_back(GenerateDenseColumn<uint32_t>("<u4", kRows, &d_data_1));
Json column_arr{columns};
std::string str;
Json::Dump(column_arr, &str);
data::CudfAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, std::numeric_limits<float>::quiet_NaN(),
-1);
EXPECT_EQ(dmat.Info().num_col_, 2);
EXPECT_EQ(dmat.Info().num_row_, 16);
EXPECT_EQ(dmat.Info().num_nonzero_, 32);
}
void TestDenseColumn(DMatrix* dmat, size_t n_rows, size_t n_cols) {
for (auto& batch : dmat->GetBatches<SparsePage>()) {
auto page = batch.GetView();
for (auto i = 0ull; i < batch.Size(); i++) {
auto inst = page[i];
for (auto j = 0ull; j < inst.size(); j++) {
EXPECT_EQ(inst[j].fvalue, i * 2);
EXPECT_EQ(inst[j].index, j);
}
}
}
ASSERT_EQ(dmat->Info().num_row_, n_rows);
ASSERT_EQ(dmat->Info().num_col_, n_cols);
}
TEST(SimpleDMatrix, FromColumnarDense) {
constexpr size_t kRows{16};
constexpr size_t kCols{2};
std::vector<Json> columns;
thrust::device_vector<float> d_data_0(kRows);
thrust::device_vector<int32_t> d_data_1(kRows);
columns.emplace_back(GenerateDenseColumn<float>("<f4", kRows, &d_data_0));
columns.emplace_back(GenerateDenseColumn<int32_t>("<i4", kRows, &d_data_1));
Json column_arr{columns};
std::string str;
Json::Dump(column_arr, &str);
// no missing value
{
data::CudfAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, std::numeric_limits<float>::quiet_NaN(),
-1);
TestDenseColumn(&dmat, kRows, kCols);
}
// with missing value specified
{
data::CudfAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, 4.0, -1);
ASSERT_EQ(dmat.Info().num_row_, kRows);
ASSERT_EQ(dmat.Info().num_col_, kCols);
ASSERT_EQ(dmat.Info().num_nonzero_, kCols * kRows - 2);
}
{
// no missing value, but has NaN
d_data_0[3] = std::numeric_limits<float>::quiet_NaN();
ASSERT_TRUE(std::isnan(d_data_0[3])); // removes 6.0
data::CudfAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, std::numeric_limits<float>::quiet_NaN(),
-1);
ASSERT_EQ(dmat.Info().num_nonzero_, kRows * kCols - 1);
ASSERT_EQ(dmat.Info().num_row_, kRows);
ASSERT_EQ(dmat.Info().num_col_, kCols);
}
}
TEST(SimpleDMatrix, FromColumnarWithEmptyRows) {
constexpr size_t kRows = 102;
constexpr size_t kCols = 24;
std::vector<Json> v_columns(kCols);
std::vector<dh::device_vector<float>> columns_data(kCols);
std::vector<dh::device_vector<RBitField8::value_type>> column_bitfields(
kCols);
RBitField8::value_type constexpr kUCOne = 1;
for (size_t i = 0; i < kCols; ++i) {
auto& col = v_columns[i];
col = Object();
auto& data = columns_data[i];
data.resize(kRows);
thrust::sequence(data.begin(), data.end(), 0);
dh::safe_cuda(cudaDeviceSynchronize());
dh::safe_cuda(cudaGetLastError());
ASSERT_EQ(data.size(), kRows);
auto p_d_data = raw_pointer_cast(data.data());
std::vector<Json> j_data{
Json(Integer(reinterpret_cast<Integer::Int>(p_d_data))),
Json(Boolean(false))};
col["data"] = j_data;
std::vector<Json> j_shape{Json(Integer(static_cast<Integer::Int>(kRows)))};
col["shape"] = Array(j_shape);
col["version"] = 3;
col["typestr"] = String("<f4");
// Construct the mask object.
col["mask"] = Object();
auto& j_mask = col["mask"];
j_mask["version"] = 3;
auto& mask_storage = column_bitfields[i];
mask_storage.resize(16); // 16 bytes
mask_storage[0] = ~(kUCOne << 2); // 3^th row is missing
mask_storage[1] = ~(kUCOne << 3); // 12^th row is missing
size_t last_ind = 12;
mask_storage[last_ind] = ~(kUCOne << 5);
std::set<size_t> missing_row_index{0, 1, last_ind};
for (size_t j = 0; j < mask_storage.size(); ++j) {
if (missing_row_index.find(j) == missing_row_index.cend()) {
// all other rows are valid
mask_storage[j] = ~0;
}
}
j_mask["data"] = std::vector<Json>{
Json(
Integer(reinterpret_cast<Integer::Int>(mask_storage.data().get()))),
Json(Boolean(false))};
j_mask["shape"] = Array(
std::vector<Json>{Json(Integer(static_cast<Integer::Int>(kRows)))});
j_mask["typestr"] = String("|i1");
}
Json column_arr{Array(v_columns)};
std::string str;
Json::Dump(column_arr, &str);
data::CudfAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, std::numeric_limits<float>::quiet_NaN(),
-1);
for (auto& batch : dmat.GetBatches<SparsePage>()) {
auto page = batch.GetView();
for (auto i = 0ull; i < batch.Size(); i++) {
auto inst = page[i];
for (auto j = 0ull; j < inst.size(); j++) {
EXPECT_EQ(inst[j].fvalue, i);
EXPECT_EQ(inst[j].index, j);
}
}
}
ASSERT_EQ(dmat.Info().num_nonzero_, (kRows - 3) * kCols);
ASSERT_EQ(dmat.Info().num_row_, kRows);
ASSERT_EQ(dmat.Info().num_col_, kCols);
}
TEST(SimpleCSRSource, FromColumnarSparse) {
constexpr size_t kRows = 32;
constexpr size_t kCols = 2;
RBitField8::value_type constexpr kUCOne = 1;
std::vector<dh::device_vector<float>> columns_data(kCols);
std::vector<dh::device_vector<RBitField8::value_type>> column_bitfields(kCols);
{
// column 0
auto& mask = column_bitfields[0];
mask.resize(8);
for (size_t j = 0; j < mask.size(); ++j) {
mask[j] = ~0;
}
// the 2^th entry of first column is invalid
// [0 0 0 0 0 1 0 0]
mask[0] = ~(kUCOne << 2);
}
{
// column 1
auto& mask = column_bitfields[1];
mask.resize(8);
for (size_t j = 0; j < mask.size(); ++j) {
mask[j] = ~0;
}
// the 19^th entry of second column is invalid
// [~0~], [~0~], [0 0 0 0 1 0 0 0]
mask[2] = ~(kUCOne << 3);
}
for (size_t c = 0; c < kCols; ++c) {
columns_data[c].resize(kRows);
thrust::sequence(columns_data[c].begin(), columns_data[c].end(), 0);
}
std::vector<Json> j_columns(kCols);
for (size_t c = 0; c < kCols; ++c) {
auto& column = j_columns[c];
column = Object();
column["version"] = 3;
column["typestr"] = String("<f4");
auto p_d_data = raw_pointer_cast(columns_data[c].data());
std::vector<Json> j_data {
Json(Integer(reinterpret_cast<Integer::Int>(p_d_data))),
Json(Boolean(false))};
column["data"] = j_data;
std::vector<Json> j_shape {Json(Integer(static_cast<Integer::Int>(kRows)))};
column["shape"] = Array(j_shape);
column["version"] = 3;
column["typestr"] = String("<f4");
column["mask"] = Object();
auto& j_mask = column["mask"];
j_mask["version"] = 3;
j_mask["data"] = std::vector<Json>{
Json(Integer(reinterpret_cast<Integer::Int>(column_bitfields[c].data().get()))),
Json(Boolean(false))};
j_mask["shape"] = Array(std::vector<Json>{Json(Integer(static_cast<Integer::Int>(kRows)))});
j_mask["typestr"] = String("|i1");
}
Json column_arr {Array(j_columns)};
std::string str;
Json::Dump(column_arr, &str);
{
data::CudfAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, std::numeric_limits<float>::quiet_NaN(), -1);
ASSERT_EQ(dmat.Info().num_row_, kRows);
ASSERT_EQ(dmat.Info().num_nonzero_, (kRows*kCols)-2);
}
{
data::CudfAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, 2.0, -1);
for (auto& batch : dmat.GetBatches<SparsePage>()) {
auto page = batch.GetView();
for (auto i = 0ull; i < batch.Size(); i++) {
auto inst = page[i];
for (auto e : inst) {
ASSERT_NE(e.fvalue, 2.0);
}
}
}
}
{
// no missing value, but has NaN
data::CudfAdapter adapter(str);
columns_data[0][4] = std::numeric_limits<float>::quiet_NaN(); // 0^th column 4^th row
data::SimpleDMatrix dmat(&adapter, std::numeric_limits<float>::quiet_NaN(),
-1);
ASSERT_TRUE(std::isnan(columns_data[0][4]));
// Two invalid entries and one NaN, in CSC
// 0^th column: 0, 1, 4, 5, 6, ..., kRows
// 1^th column: 0, 1, 2, 3, ..., 19, 21, ..., kRows
ASSERT_EQ(dmat.Info().num_nonzero_, kRows * kCols - 3);
}
}
TEST(SimpleDMatrix, FromColumnarSparseBasic) {
constexpr size_t kRows{16};
std::vector<Json> columns;
thrust::device_vector<double> d_data_0(kRows);
thrust::device_vector<uint32_t> d_data_1(kRows);
columns.emplace_back(GenerateSparseColumn<double>("<f8", kRows, &d_data_0));
columns.emplace_back(GenerateSparseColumn<uint32_t>("<u4", kRows, &d_data_1));
Json column_arr{columns};
std::string str;
Json::Dump(column_arr, &str);
data::CudfAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, std::numeric_limits<float>::quiet_NaN(),
-1);
EXPECT_EQ(dmat.Info().num_col_, 2);
EXPECT_EQ(dmat.Info().num_row_, 16);
EXPECT_EQ(dmat.Info().num_nonzero_, 32);
for (auto& batch : dmat.GetBatches<SparsePage>()) {
auto page = batch.GetView();
for (auto i = 0ull; i < batch.Size(); i++) {
auto inst = page[i];
for (auto j = 0ull; j < inst.size(); j++) {
EXPECT_EQ(inst[j].fvalue, i * 2);
EXPECT_EQ(inst[j].index, j);
}
}
}
}
TEST(SimpleDMatrix, FromCupy){
int rows = 50;
int cols = 10;
thrust::device_vector< float> data(rows*cols);
auto json_array_interface = Generate2dArrayInterface(rows, cols, "<f4", &data);
std::string str;
Json::Dump(json_array_interface, &str);
data::CupyAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, -1, 1);
EXPECT_EQ(dmat.Info().num_col_, cols);
EXPECT_EQ(dmat.Info().num_row_, rows);
EXPECT_EQ(dmat.Info().num_nonzero_, rows*cols);
for (auto& batch : dmat.GetBatches<SparsePage>()) {
auto page = batch.GetView();
for (auto i = 0ull; i < batch.Size(); i++) {
auto inst = page[i];
for (auto j = 0ull; j < inst.size(); j++) {
EXPECT_EQ(inst[j].fvalue, i * cols + j);
EXPECT_EQ(inst[j].index, j);
}
}
}
}
TEST(SimpleDMatrix, FromCupySparse){
int rows = 2;
int cols = 2;
thrust::device_vector< float> data(rows*cols);
auto json_array_interface = Generate2dArrayInterface(rows, cols, "<f4", &data);
data[1] = std::numeric_limits<float>::quiet_NaN();
data[2] = std::numeric_limits<float>::quiet_NaN();
std::string str;
Json::Dump(json_array_interface, &str);
data::CupyAdapter adapter(str);
data::SimpleDMatrix dmat(&adapter, -1, 1);
EXPECT_EQ(dmat.Info().num_col_, cols);
EXPECT_EQ(dmat.Info().num_row_, rows);
EXPECT_EQ(dmat.Info().num_nonzero_, rows * cols - 2);
auto& batch = *dmat.GetBatches<SparsePage>().begin();
auto page = batch.GetView();
auto inst0 = page[0];
auto inst1 = page[1];
EXPECT_EQ(page[0].size(), 1);
EXPECT_EQ(page[1].size(), 1);
EXPECT_EQ(page[0][0].fvalue, 0.0f);
EXPECT_EQ(page[0][0].index, 0);
EXPECT_EQ(page[1][0].fvalue, 3.0f);
EXPECT_EQ(page[1][0].index, 1);
}
|
dca43456041890431b374b404b0e3c82653703ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/set_operations.h>
#include <thrust/execution_policy.h>
#ifdef THRUST_TEST_DEVICE_SIDE
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Iterator5, typename Iterator6>
__global__
void set_intersection_by_key_kernel(ExecutionPolicy exec,
Iterator1 keys_first1, Iterator1 keys_last1,
Iterator2 keys_first2, Iterator2 keys_last2,
Iterator3 values_first1,
Iterator4 keys_result,
Iterator5 values_result,
Iterator6 result)
{
*result = thrust::set_intersection_by_key(exec, keys_first1, keys_last1, keys_first2, keys_last2, values_first1, keys_result, values_result);
}
template<typename ExecutionPolicy>
void TestSetIntersectionByKeyDevice(ExecutionPolicy exec)
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a_key(3), b_key(4);
Vector a_val(3);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 0; a_val[1] = 0; a_val[2] = 0;
b_key[0] = 0; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
Vector ref_key(2), ref_val(2);
ref_key[0] = 0; ref_key[1] = 4;
ref_val[0] = 0; ref_val[1] = 0;
Vector result_key(2), result_val(2);
typedef thrust::pair<Iterator,Iterator> iter_pair;
thrust::device_vector<iter_pair> end_vec(1);
hipLaunchKernelGGL(( set_intersection_by_key_kernel), dim3(1),dim3(1), 0, 0, exec,
a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(),
result_key.begin(),
result_val.begin(),
end_vec.begin());
hipError_t const err = hipDeviceSynchronize();
ASSERT_EQUAL(hipSuccess, err);
thrust::pair<Iterator,Iterator> end = end_vec.front();
ASSERT_EQUAL_QUIET(result_key.end(), end.first);
ASSERT_EQUAL_QUIET(result_val.end(), end.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
}
void TestSetIntersectionByKeyDeviceSeq()
{
TestSetIntersectionByKeyDevice(thrust::seq);
}
DECLARE_UNITTEST(TestSetIntersectionByKeyDeviceSeq);
void TestSetIntersectionByKeyDeviceDevice()
{
TestSetIntersectionByKeyDevice(thrust::device);
}
DECLARE_UNITTEST(TestSetIntersectionByKeyDeviceDevice);
void TestSetIntersectionByKeyDeviceNoSync()
{
TestSetIntersectionByKeyDevice(thrust::hip::par_nosync);
}
DECLARE_UNITTEST(TestSetIntersectionByKeyDeviceNoSync);
#endif
template<typename ExecutionPolicy>
void TestSetIntersectionByKeyCudaStreams(ExecutionPolicy policy)
{
typedef thrust::device_vector<int> Vector;
typedef Vector::iterator Iterator;
Vector a_key(3), b_key(4);
Vector a_val(3);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 0; a_val[1] = 0; a_val[2] = 0;
b_key[0] = 0; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
Vector ref_key(2), ref_val(2);
ref_key[0] = 0; ref_key[1] = 4;
ref_val[0] = 0; ref_val[1] = 0;
Vector result_key(2), result_val(2);
hipStream_t s;
hipStreamCreate(&s);
auto streampolicy = policy.on(s);
thrust::pair<Iterator,Iterator> end =
thrust::set_intersection_by_key(streampolicy,
a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(),
result_key.begin(),
result_val.begin());
hipStreamSynchronize(s);
ASSERT_EQUAL_QUIET(result_key.end(), end.first);
ASSERT_EQUAL_QUIET(result_val.end(), end.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
hipStreamDestroy(s);
}
void TestSetIntersectionByKeyCudaStreamsSync()
{
TestSetIntersectionByKeyCudaStreams(thrust::hip::par);
}
DECLARE_UNITTEST(TestSetIntersectionByKeyCudaStreamsSync);
void TestSetIntersectionByKeyCudaStreamsNoSync()
{
TestSetIntersectionByKeyCudaStreams(thrust::hip::par_nosync);
}
DECLARE_UNITTEST(TestSetIntersectionByKeyCudaStreamsNoSync);
| dca43456041890431b374b404b0e3c82653703ca.cu | #include <unittest/unittest.h>
#include <thrust/set_operations.h>
#include <thrust/execution_policy.h>
#ifdef THRUST_TEST_DEVICE_SIDE
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Iterator5, typename Iterator6>
__global__
void set_intersection_by_key_kernel(ExecutionPolicy exec,
Iterator1 keys_first1, Iterator1 keys_last1,
Iterator2 keys_first2, Iterator2 keys_last2,
Iterator3 values_first1,
Iterator4 keys_result,
Iterator5 values_result,
Iterator6 result)
{
*result = thrust::set_intersection_by_key(exec, keys_first1, keys_last1, keys_first2, keys_last2, values_first1, keys_result, values_result);
}
template<typename ExecutionPolicy>
void TestSetIntersectionByKeyDevice(ExecutionPolicy exec)
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a_key(3), b_key(4);
Vector a_val(3);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 0; a_val[1] = 0; a_val[2] = 0;
b_key[0] = 0; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
Vector ref_key(2), ref_val(2);
ref_key[0] = 0; ref_key[1] = 4;
ref_val[0] = 0; ref_val[1] = 0;
Vector result_key(2), result_val(2);
typedef thrust::pair<Iterator,Iterator> iter_pair;
thrust::device_vector<iter_pair> end_vec(1);
set_intersection_by_key_kernel<<<1,1>>>(exec,
a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(),
result_key.begin(),
result_val.begin(),
end_vec.begin());
cudaError_t const err = cudaDeviceSynchronize();
ASSERT_EQUAL(cudaSuccess, err);
thrust::pair<Iterator,Iterator> end = end_vec.front();
ASSERT_EQUAL_QUIET(result_key.end(), end.first);
ASSERT_EQUAL_QUIET(result_val.end(), end.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
}
void TestSetIntersectionByKeyDeviceSeq()
{
TestSetIntersectionByKeyDevice(thrust::seq);
}
DECLARE_UNITTEST(TestSetIntersectionByKeyDeviceSeq);
void TestSetIntersectionByKeyDeviceDevice()
{
TestSetIntersectionByKeyDevice(thrust::device);
}
DECLARE_UNITTEST(TestSetIntersectionByKeyDeviceDevice);
void TestSetIntersectionByKeyDeviceNoSync()
{
TestSetIntersectionByKeyDevice(thrust::cuda::par_nosync);
}
DECLARE_UNITTEST(TestSetIntersectionByKeyDeviceNoSync);
#endif
template<typename ExecutionPolicy>
void TestSetIntersectionByKeyCudaStreams(ExecutionPolicy policy)
{
typedef thrust::device_vector<int> Vector;
typedef Vector::iterator Iterator;
Vector a_key(3), b_key(4);
Vector a_val(3);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 0; a_val[1] = 0; a_val[2] = 0;
b_key[0] = 0; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
Vector ref_key(2), ref_val(2);
ref_key[0] = 0; ref_key[1] = 4;
ref_val[0] = 0; ref_val[1] = 0;
Vector result_key(2), result_val(2);
cudaStream_t s;
cudaStreamCreate(&s);
auto streampolicy = policy.on(s);
thrust::pair<Iterator,Iterator> end =
thrust::set_intersection_by_key(streampolicy,
a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(),
result_key.begin(),
result_val.begin());
cudaStreamSynchronize(s);
ASSERT_EQUAL_QUIET(result_key.end(), end.first);
ASSERT_EQUAL_QUIET(result_val.end(), end.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
cudaStreamDestroy(s);
}
void TestSetIntersectionByKeyCudaStreamsSync()
{
TestSetIntersectionByKeyCudaStreams(thrust::cuda::par);
}
DECLARE_UNITTEST(TestSetIntersectionByKeyCudaStreamsSync);
void TestSetIntersectionByKeyCudaStreamsNoSync()
{
TestSetIntersectionByKeyCudaStreams(thrust::cuda::par_nosync);
}
DECLARE_UNITTEST(TestSetIntersectionByKeyCudaStreamsNoSync);
|
f48c027389806cf9c6171d3025d6f46852e016ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* .optix.cu - Copyright 2019/2020 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file contains a minimal set of Optix functions. From here we will
dispatch program flow to our own functions that implement the path tracer.
*/
#include "../kernels/noerrors.h"
#include "helper_math.h"
// function defintion helper
#define LH2_DEVFUNC static __forceinline__ __device__
// global include files
#include "../../RenderSystem/common_settings.h"
#include "../../RenderSystem/common_functions.h"
#include "../../RenderSystem/common_types.h"
#define OPTIX_CU // skip CUDAMaterial definition in core_settings.h; not needed here
#include "../core_settings.h"
// global path tracing parameters
extern "C" { __constant__ Params params; }
// tools
__device__ __inline__ uint WangHash( uint s ) { s = (s ^ 61) ^ (s >> 16), s *= 9, s = s ^ (s >> 4), s *= 0x27d4eb2d, s = s ^ (s >> 15); return s; }
__device__ __inline__ uint RandomInt( uint& s ) { s ^= s << 13, s ^= s >> 17, s ^= s << 5; return s; }
__device__ __inline__ float RandomFloat( uint& s ) { return RandomInt( s ) * 2.3283064365387e-10f; }
static __inline __device__ float blueNoiseSampler( int x, int y, int sampleIndex, int sampleDimension )
{
// Adapated from E. Heitz. Arguments:
// sampleIndex: 0..255
// sampleDimension: 0..255
x &= 127, y &= 127, sampleIndex &= 255, sampleDimension &= 255;
// xor index based on optimized ranking
int rankedSampleIndex = (sampleIndex ^ params.blueNoise[sampleDimension + (x + y * 128) * 8 + 65536 * 3]) & 255;
// fetch value in sequence
int value = params.blueNoise[sampleDimension + rankedSampleIndex * 256];
// if the dimension is optimized, xor sequence value based on optimized scrambling
value ^= params.blueNoise[(sampleDimension & 7) + (x + y * 128) * 8 + 65536];
// convert to float and return
return (0.5f + value) * (1.0f / 256.0f);
}
LH2_DEVFUNC float4 blueNoiseSampler4( int x, int y, int sampleIndex, int sampleDimension )
{
// Optimized retrieval of 4 blue noise samples.
const uint4 bn4 = *((uint4*)(params.blueNoise + sampleDimension + (x + y * 128) * 8 + 65536 * 3));
const int rsi1 = (sampleIndex ^ bn4.x) & 255, rsi2 = (sampleIndex ^ bn4.y) & 255;
const int rsi3 = (sampleIndex ^ bn4.z) & 255, rsi4 = (sampleIndex ^ bn4.w) & 255;
const int v1 = params.blueNoise[sampleDimension + 0 + rsi1 * 256];
const int v2 = params.blueNoise[sampleDimension + 1 + rsi2 * 256];
const int v3 = params.blueNoise[sampleDimension + 2 + rsi3 * 256];
const int v4 = params.blueNoise[sampleDimension + 3 + rsi4 * 256];
const uint4 bx4 = *((uint4*)(params.blueNoise + (sampleDimension & 7) + (x + y * 128) * 8 + 65536));
return make_float4( (0.5f + (v1 ^ bx4.x)) * (1.0f / 256.0f), (0.5f + (v2 ^ bx4.y)) * (1.0f / 256.0f),
(0.5f + (v3 ^ bx4.z)) * (1.0f / 256.0f), (0.5f + (v4 ^ bx4.w)) * (1.0f / 256.0f) );
}
static __inline __device__ float3 RandomPointOnLens( const float r0, float r1 )
{
const float blade = (int)(r0 * 9);
float r2 = (r0 - blade * (1.0f / 9.0f)) * 9.0f;
float x1, y1, x2, y2;
__sincosf( blade * PI / 4.5f, &x1, &y1 );
__sincosf( (blade + 1.0f) * PI / 4.5f, &x2, &y2 );
if ((r1 + r2) > 1) r1 = 1.0f - r1, r2 = 1.0f - r2;
const float xr = x1 * r1 + x2 * r2;
const float yr = y1 * r1 + y2 * r2;
float4 posLens = params.posLensSize;
return make_float3( posLens ) + posLens.w * (params.right * xr + params.up * yr);
}
static __inline __device__ void generateEyeRay( float3& O, float3& D, const uint pixelIdx, const uint sampleIdx, uint& seed )
{
// random point on pixel and lens
int sx = pixelIdx % params.scrsize.x;
int sy = pixelIdx / params.scrsize.x;
int shift = params.shift;
float4 r4;
if (sampleIdx < 64)
{
r4 = blueNoiseSampler4( (sx + (shift & 127)) & 127, (sy + (shift >> 24)) & 127, sampleIdx, 0 );
}
else
{
r4.x = RandomFloat( seed ), r4.y = RandomFloat( seed );
r4.z = RandomFloat( seed ), r4.w = RandomFloat( seed );
}
O = RandomPointOnLens( r4.x, r4.z );
float3 posOnPixel = RayTarget( sx, sy, r4.y, r4.w, make_int2( params.scrsize ), params.distortion, params.p1, params.right, params.up );
D = normalize( posOnPixel - O );
}
#if __CUDA_ARCH__ >= 700
#define THREADMASK __activemask() // volta, turing
#else
#define THREADMASK 0xffffffff // pascal, kepler, fermi
#endif
__device__ void setupPrimaryRay( const uint pathIdx, const uint stride )
{
const uint pixelIdx = pathIdx % (params.scrsize.x * params.scrsize.y);
const uint sampleIdx = pathIdx / (params.scrsize.x * params.scrsize.y) + params.pass;
uint seed = WangHash( pathIdx * 16789 + params.pass * 1791 );
// generate eye ray
float3 O, D;
generateEyeRay( O, D, pixelIdx, sampleIdx, seed );
// populate path state array
params.pathStates[pathIdx] = make_float4( O, __uint_as_float( (pathIdx << 6) + 1 /* S_SPECULAR in CUDA code */ ) );
params.pathStates[pathIdx + stride] = make_float4( D, 0 );
// trace eye ray
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, O, D, params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
if (pixelIdx < stride /* OptiX bug workaround? */) if (u2 != 0xffffffff) /* bandwidth reduction */
params.hitData[pathIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) );
}
__device__ void setupSecondaryRay( const uint rayIdx, const uint stride )
{
const float4 O4 = params.pathStates[rayIdx];
const float4 D4 = params.pathStates[rayIdx + stride];
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
if (rayIdx < stride /* OptiX bug workaround? */) if (u2 != 0xffffffff) /* bandwidth reduction */
params.hitData[rayIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) );
}
__device__ void setupPhotonRay( const uint rayIdx )
{
const float4 O4 = params.pathStates[rayIdx * 3 + 0];
const float4 D4 = params.pathStates[rayIdx * 3 + 1];
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
params.pathStates[rayIdx * 3 + 0].w = __uint_as_float( u3 /* intersection distance */ );
params.pathStates[rayIdx * 3 + 2].y = __uint_as_float( u1 /* inst_idx */ );
params.pathStates[rayIdx * 3 + 2].z = __uint_as_float( u2 /* prim_idx */ );
}
__device__ void generateShadowRay( const uint rayIdx, const uint stride )
{
const float4 O4 = params.connectData[rayIdx]; // O4
const float4 D4 = params.connectData[rayIdx + stride * 2]; // D4
// launch shadow ray
uint u0 = 1;
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, D4.w, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, 1, 2, 1, u0 );
if (u0) return;
const float4 E4 = params.connectData[rayIdx + stride * 2 * 2]; // E4
const int pixelIdx = __float_as_int( E4.w );
if (pixelIdx < stride /* OptiX bug workaround? */) params.accumulator[pixelIdx] += make_float4( E4.x, E4.y, E4.z, 1 );
}
extern "C" __global__ void __raygen__rg()
{
const uint stride = params.scrsize.x * params.scrsize.y * params.scrsize.z;
const uint3 idx = optixGetLaunchIndex();
const uint rayIdx = idx.x + idx.y * params.scrsize.x;
switch (params.phase)
{
case Params::SPAWN_PRIMARY: /* primary rays */ setupPrimaryRay( rayIdx, stride ); break;
case Params::SPAWN_SHADOW: /* shadow rays */ generateShadowRay( rayIdx, stride ); break;
case Params::SPAWN_SECONDARY: /* secondary rays */ setupSecondaryRay( rayIdx, stride ); break;
case Params::SPAWN_PHOTONS: /* photons */ setupPhotonRay( idx.x ); break;
}
}
extern "C" __global__ void __miss__occlusion()
{
optixSetPayload_0( 0u ); // instead of any hit. suggested by WillUsher.io.
}
extern "C" __global__ void __closesthit__radiance()
{
const uint prim_idx = optixGetPrimitiveIndex();
const uint inst_idx = optixGetInstanceIndex();
const float2 bary = optixGetTriangleBarycentrics();
const float tmin = optixGetRayTmax();
optixSetPayload_0( (uint)(65535.0f * bary.x) + ((uint)(65535.0f * bary.y) << 16) );
optixSetPayload_1( inst_idx );
optixSetPayload_2( prim_idx );
optixSetPayload_3( __float_as_uint( tmin ) );
}
// EOF | f48c027389806cf9c6171d3025d6f46852e016ac.cu | /* .optix.cu - Copyright 2019/2020 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file contains a minimal set of Optix functions. From here we will
dispatch program flow to our own functions that implement the path tracer.
*/
#include "../kernels/noerrors.h"
#include "helper_math.h"
// function defintion helper
#define LH2_DEVFUNC static __forceinline__ __device__
// global include files
#include "../../RenderSystem/common_settings.h"
#include "../../RenderSystem/common_functions.h"
#include "../../RenderSystem/common_types.h"
#define OPTIX_CU // skip CUDAMaterial definition in core_settings.h; not needed here
#include "../core_settings.h"
// global path tracing parameters
extern "C" { __constant__ Params params; }
// tools
__device__ __inline__ uint WangHash( uint s ) { s = (s ^ 61) ^ (s >> 16), s *= 9, s = s ^ (s >> 4), s *= 0x27d4eb2d, s = s ^ (s >> 15); return s; }
__device__ __inline__ uint RandomInt( uint& s ) { s ^= s << 13, s ^= s >> 17, s ^= s << 5; return s; }
__device__ __inline__ float RandomFloat( uint& s ) { return RandomInt( s ) * 2.3283064365387e-10f; }
static __inline __device__ float blueNoiseSampler( int x, int y, int sampleIndex, int sampleDimension )
{
// Adapated from E. Heitz. Arguments:
// sampleIndex: 0..255
// sampleDimension: 0..255
x &= 127, y &= 127, sampleIndex &= 255, sampleDimension &= 255;
// xor index based on optimized ranking
int rankedSampleIndex = (sampleIndex ^ params.blueNoise[sampleDimension + (x + y * 128) * 8 + 65536 * 3]) & 255;
// fetch value in sequence
int value = params.blueNoise[sampleDimension + rankedSampleIndex * 256];
// if the dimension is optimized, xor sequence value based on optimized scrambling
value ^= params.blueNoise[(sampleDimension & 7) + (x + y * 128) * 8 + 65536];
// convert to float and return
return (0.5f + value) * (1.0f / 256.0f);
}
LH2_DEVFUNC float4 blueNoiseSampler4( int x, int y, int sampleIndex, int sampleDimension )
{
// Optimized retrieval of 4 blue noise samples.
const uint4 bn4 = *((uint4*)(params.blueNoise + sampleDimension + (x + y * 128) * 8 + 65536 * 3));
const int rsi1 = (sampleIndex ^ bn4.x) & 255, rsi2 = (sampleIndex ^ bn4.y) & 255;
const int rsi3 = (sampleIndex ^ bn4.z) & 255, rsi4 = (sampleIndex ^ bn4.w) & 255;
const int v1 = params.blueNoise[sampleDimension + 0 + rsi1 * 256];
const int v2 = params.blueNoise[sampleDimension + 1 + rsi2 * 256];
const int v3 = params.blueNoise[sampleDimension + 2 + rsi3 * 256];
const int v4 = params.blueNoise[sampleDimension + 3 + rsi4 * 256];
const uint4 bx4 = *((uint4*)(params.blueNoise + (sampleDimension & 7) + (x + y * 128) * 8 + 65536));
return make_float4( (0.5f + (v1 ^ bx4.x)) * (1.0f / 256.0f), (0.5f + (v2 ^ bx4.y)) * (1.0f / 256.0f),
(0.5f + (v3 ^ bx4.z)) * (1.0f / 256.0f), (0.5f + (v4 ^ bx4.w)) * (1.0f / 256.0f) );
}
static __inline __device__ float3 RandomPointOnLens( const float r0, float r1 )
{
const float blade = (int)(r0 * 9);
float r2 = (r0 - blade * (1.0f / 9.0f)) * 9.0f;
float x1, y1, x2, y2;
__sincosf( blade * PI / 4.5f, &x1, &y1 );
__sincosf( (blade + 1.0f) * PI / 4.5f, &x2, &y2 );
if ((r1 + r2) > 1) r1 = 1.0f - r1, r2 = 1.0f - r2;
const float xr = x1 * r1 + x2 * r2;
const float yr = y1 * r1 + y2 * r2;
float4 posLens = params.posLensSize;
return make_float3( posLens ) + posLens.w * (params.right * xr + params.up * yr);
}
static __inline __device__ void generateEyeRay( float3& O, float3& D, const uint pixelIdx, const uint sampleIdx, uint& seed )
{
// random point on pixel and lens
int sx = pixelIdx % params.scrsize.x;
int sy = pixelIdx / params.scrsize.x;
int shift = params.shift;
float4 r4;
if (sampleIdx < 64)
{
r4 = blueNoiseSampler4( (sx + (shift & 127)) & 127, (sy + (shift >> 24)) & 127, sampleIdx, 0 );
}
else
{
r4.x = RandomFloat( seed ), r4.y = RandomFloat( seed );
r4.z = RandomFloat( seed ), r4.w = RandomFloat( seed );
}
O = RandomPointOnLens( r4.x, r4.z );
float3 posOnPixel = RayTarget( sx, sy, r4.y, r4.w, make_int2( params.scrsize ), params.distortion, params.p1, params.right, params.up );
D = normalize( posOnPixel - O );
}
#if __CUDA_ARCH__ >= 700
#define THREADMASK __activemask() // volta, turing
#else
#define THREADMASK 0xffffffff // pascal, kepler, fermi
#endif
__device__ void setupPrimaryRay( const uint pathIdx, const uint stride )
{
const uint pixelIdx = pathIdx % (params.scrsize.x * params.scrsize.y);
const uint sampleIdx = pathIdx / (params.scrsize.x * params.scrsize.y) + params.pass;
uint seed = WangHash( pathIdx * 16789 + params.pass * 1791 );
// generate eye ray
float3 O, D;
generateEyeRay( O, D, pixelIdx, sampleIdx, seed );
// populate path state array
params.pathStates[pathIdx] = make_float4( O, __uint_as_float( (pathIdx << 6) + 1 /* S_SPECULAR in CUDA code */ ) );
params.pathStates[pathIdx + stride] = make_float4( D, 0 );
// trace eye ray
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, O, D, params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
if (pixelIdx < stride /* OptiX bug workaround? */) if (u2 != 0xffffffff) /* bandwidth reduction */
params.hitData[pathIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) );
}
__device__ void setupSecondaryRay( const uint rayIdx, const uint stride )
{
const float4 O4 = params.pathStates[rayIdx];
const float4 D4 = params.pathStates[rayIdx + stride];
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
if (rayIdx < stride /* OptiX bug workaround? */) if (u2 != 0xffffffff) /* bandwidth reduction */
params.hitData[rayIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) );
}
__device__ void setupPhotonRay( const uint rayIdx )
{
const float4 O4 = params.pathStates[rayIdx * 3 + 0];
const float4 D4 = params.pathStates[rayIdx * 3 + 1];
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
params.pathStates[rayIdx * 3 + 0].w = __uint_as_float( u3 /* intersection distance */ );
params.pathStates[rayIdx * 3 + 2].y = __uint_as_float( u1 /* inst_idx */ );
params.pathStates[rayIdx * 3 + 2].z = __uint_as_float( u2 /* prim_idx */ );
}
__device__ void generateShadowRay( const uint rayIdx, const uint stride )
{
const float4 O4 = params.connectData[rayIdx]; // O4
const float4 D4 = params.connectData[rayIdx + stride * 2]; // D4
// launch shadow ray
uint u0 = 1;
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, D4.w, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, 1, 2, 1, u0 );
if (u0) return;
const float4 E4 = params.connectData[rayIdx + stride * 2 * 2]; // E4
const int pixelIdx = __float_as_int( E4.w );
if (pixelIdx < stride /* OptiX bug workaround? */) params.accumulator[pixelIdx] += make_float4( E4.x, E4.y, E4.z, 1 );
}
extern "C" __global__ void __raygen__rg()
{
const uint stride = params.scrsize.x * params.scrsize.y * params.scrsize.z;
const uint3 idx = optixGetLaunchIndex();
const uint rayIdx = idx.x + idx.y * params.scrsize.x;
switch (params.phase)
{
case Params::SPAWN_PRIMARY: /* primary rays */ setupPrimaryRay( rayIdx, stride ); break;
case Params::SPAWN_SHADOW: /* shadow rays */ generateShadowRay( rayIdx, stride ); break;
case Params::SPAWN_SECONDARY: /* secondary rays */ setupSecondaryRay( rayIdx, stride ); break;
case Params::SPAWN_PHOTONS: /* photons */ setupPhotonRay( idx.x ); break;
}
}
extern "C" __global__ void __miss__occlusion()
{
optixSetPayload_0( 0u ); // instead of any hit. suggested by WillUsher.io.
}
extern "C" __global__ void __closesthit__radiance()
{
const uint prim_idx = optixGetPrimitiveIndex();
const uint inst_idx = optixGetInstanceIndex();
const float2 bary = optixGetTriangleBarycentrics();
const float tmin = optixGetRayTmax();
optixSetPayload_0( (uint)(65535.0f * bary.x) + ((uint)(65535.0f * bary.y) << 16) );
optixSetPayload_1( inst_idx );
optixSetPayload_2( prim_idx );
optixSetPayload_3( __float_as_uint( tmin ) );
}
// EOF |
bc671647953be6b44f15b34af8d43c9bf5bf08ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* @Author: jose
* @Date: 2020-08-24 00:00:00
* @Last Modified by: jose
* @Last Modified time: 2020-08-24 00:00:00
*/
// local libs
#include "kernels_hip.cuh"
#include <hipfft.h>
// ===========================================
// Check Errors
// ===========================================
#define imart_assert_kernel(status, msg) \
imart_assert_kernel_error((status), __FILE__, __LINE__, msg);
void imart_assert_kernel_error(hipError_t code, const char *file, int line, const char* msg, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"\n******* CUDA Error *******"\
"\n[Error] Information:\t%s"\
"\n[Error] Error code:\t%i"\
"\n[Error] Description:\t%s"\
"\n[Error] File:\t\t%s"\
"\n[Error] Line:\t\t%d\n",
msg, code, hipGetErrorString(code), file, line);
if (abort) exit(code);
};
};
// ===========================================
// Kernels
// ===========================================
// ===========================================
// Data Kernels
// ===========================================
template <typename type>
__global__ void kernel_assign(type * vin, type value, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vin[i] = value;
};
template <typename type>
__global__ void kernel_copy(const type * vin, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin[i];
};
template <typename typein, typename typeout>
__global__ void kernel_cast(const typein * vin, typeout * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = typeout(vin[i]);
};
// ===========================================
// Vector Kernels
// ===========================================
template <typename type>
__global__ void kernel_add_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin[i] + scalar;
};
template <typename type>
__global__ void kernel_sub_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin[i] - scalar;
};
template <typename type>
__global__ void kernel_sub_scalar_inv(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = scalar - vin[i];
};
template <typename type>
__global__ void kernel_mul_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin[i] * scalar;
};
template <typename type>
__global__ void kernel_div_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin[i] / scalar;
};
template <typename type>
__global__ void kernel_div_scalar_inv(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = scalar / vin[i];
};
template <typename type>
__global__ void kernel_pow_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = pow( vin[i], scalar );
};
template <typename type>
__global__ void kernel_pow_scalar_inv(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = pow( scalar, vin[i] );
};
template <typename type>
__global__ void kernel_add(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin1[i] + vin2[i];
};
template <typename type>
__global__ void kernel_sub(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin1[i] - vin2[i];
};
template <typename type>
__global__ void kernel_mul(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin1[i] * vin2[i];
};
template <typename type>
__global__ void kernel_div(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin1[i] / vin2[i];
};
template <typename type>
__global__ void kernel_pow(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = pow( vin1[i], vin2[i] );
};
template <typename type>
__global__ void kernel_equal(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = (vin1[i] == vin2[i]);
};
template <typename type>
__global__ void kernel_greater(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = (vin1[i] > vin2[i]);
};
template <typename type>
__global__ void kernel_less(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = (vin1[i] < vin2[i]);
};
template <typename type>
__global__ void kernel_greater_equal(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin1[i] >= vin2[i];
};
template <typename type>
__global__ void kernel_less_equal(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin1[i] <= vin2[i];
};
template <typename type>
__global__ void kernel_equal_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = (vin[i] == scalar);
};
template <typename type>
__global__ void kernel_greater_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = (vin[i] > scalar);
};
template <typename type>
__global__ void kernel_less_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = (vin[i] < scalar);
};
template <typename type>
__global__ void kernel_greater_equal_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = (vin[i] >= scalar);
};
template <typename type>
__global__ void kernel_less_equal_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = (vin[i] <= scalar);
};
template <typename type>
__global__ void kernel_replace(const type * idxs, const type * vin, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
if (idxs[i]) vout[i] = vin[i];
};
};
template <typename type>
__global__ void kernel_replace_scalar(const type * idxs, type * vout, type value, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
if (idxs[i]) vout[i] = value;
};
};
// ===========================================
// Reduction Kernels
// ===========================================
template <typename type>
__global__ void kernel_sum(const type *vin, type *vout, int n)
{
__shared__ type sdata[256]; // Warning, threads should be 256
unsigned int iii = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
type sum = 0;
for (unsigned int i = iii; i < n; i += gridDim.x * blockDim.x)
{
sum += vin[i];
};
sdata[tid] = sum;
__syncthreads();
for (unsigned int s = blockDim.x >> 1; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
};
__syncthreads();
};
if (tid == 0) vout[blockIdx.x] = sdata[0];
};
template <typename type>
__global__ void kernel_min(const type *vin, type *vout, int n)
{
__shared__ type sdata[256];
unsigned int iii = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
type thread_result = vin[0];
for (unsigned int i = iii; i < n; i += gridDim.x * blockDim.x)
{
type tmp = vin[i];
thread_result = thread_result < tmp ? thread_result : tmp;
};
sdata[tid] = thread_result;
__syncthreads();
for (unsigned int s = blockDim.x >> 1; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = sdata[tid] < sdata[tid + s]? sdata[tid] : sdata[tid + s];
};
__syncthreads();
};
if (tid == 0) vout[blockIdx.x] = sdata[0];
};
template <typename type>
__global__ void kernel_max(const type *vin, type *vout, int n)
{
__shared__ type sdata[256];
unsigned int iii = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
type thread_result = vin[0];
for (unsigned int i = iii; i < n; i += gridDim.x * blockDim.x)
{
type tmp = vin[i];
thread_result = thread_result > tmp ? thread_result : tmp;
};
sdata[tid] = thread_result;
__syncthreads();
for (unsigned int s = blockDim.x >> 1; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = sdata[tid] > sdata[tid + s]? sdata[tid] : sdata[tid + s];
};
__syncthreads();
};
if (tid == 0) vout[blockIdx.x] = sdata[0];
};
// ===========================================
// Image Kernels
// ===========================================
template <typename type>
__global__ void kernel_pad_2d(const type * vin, type * vout, int start0, int start1,
int end0, int end1, int n0, int n1)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int wo = n0+start0+end0;
if (i < n0 && j < n1) // width = n0, heigth = n1
{
vout[start0+i + (start1+j)*wo] = vin[i + j*n0];
};
};
template <typename type>
__global__ void kernel_unpad_2d(const type * vin, type * vout, int start0, int start1,
int end0, int end1, int n0, int n1)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int wo = n0+start0+end0;
if (i < n0 && j < n1) // width = n0, heigth = n1
{
vout[i + j*n0] = vin[start0+i + (start1+j)*wo];
};
};
template <typename type>
__global__ void kernel_pad_3d(const type * vin, type * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2)
{
// int blockIdx_z = __float2int_rd(blockIdx.y * invBlocksInY);
// int blockIdx_y = blockIdx.y - (blockIdx_z * blocksInY);
// int i = (blockIdx.x * blockDim.x) + threadIdx.x;
// int j = (blockIdx_y * blockDim.y) + threadIdx.y;
// int k = (blockIdx_z * blockDim.z) + threadIdx.z;
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
int wo = n0+start0+end0; //vout size
int ho = n1+start1+end1; //vout size
if (i < n0 && j < n1 && k < n2) // width = n0, height = n1, depth = n2
{
vout[start0+i + (start1+j)*wo + (start2+k)*wo*ho] = vin[i + j*n0 + k*n0*n1];
};
};
template <typename type>
__global__ void kernel_unpad_3d(const type * vin, type * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
int wo = n0+start0+end0; //vout size
int ho = n1+start1+end1; //vout size
if (i < n0 && j < n1 && k < n2) // width = n0, height = n1, depth = n2
{
vout[i + j*n0 + k*n0*n1] = vin[start0+i + (start1+j)*wo + (start2+k)*wo*ho];
};
};
template <typename type>
__global__ void kernel_grid_2d( type * x, type * y, double * sod,
int n0, int n1)
{
// consider sod conversion to float to support all gpu
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
double c0 = sod[0]; double c1 = sod[1];
double o0 = sod[2]; double o1 = sod[3];
double d0 = sod[4]; double d1 = sod[5];
double d2 = sod[6]; double d3 = sod[7];
if (i < n0 && j < n1) // width = n0, heigth = n1
{
x[i+j*n0] = (type)(d0*c0*i + d1*c1*j + o0);
y[i+j*n0] = (type)(d2*c0*i + d3*c1*j + o1);
};
};
template <typename type>
__global__ void kernel_grid_3d( type * x, type * y, type * z, double * sod,
int n0, int n1, int n2)
{
// consider sod conversion to float to support all gpu
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
double c0 = sod[0]; double c1 = sod[1]; double c2 = sod[2];
double o0 = sod[3]; double o1 = sod[4]; double o2 = sod[5];
double d0 = sod[6]; double d1 = sod[7]; double d2 = sod[8];
double d3 = sod[9]; double d4 = sod[10]; double d5 = sod[11];
double d6 = sod[12]; double d7 = sod[13]; double d8 = sod[14];
if (i < n0 && j < n1 && k < n2) // width = n0, height = n1, depth = n2
{
x[i + j*n0 + k*n0*n1] = (type)(d0*c0*i + d1*c1*j + d2*c2*k + o0);
y[i + j*n0 + k*n0*n1] = (type)(d3*c0*i + d4*c1*j + d5*c2*k + o1);
z[i + j*n0 + k*n0*n1] = (type)(d6*c0*i + d7*c1*j + d8*c2*k + o2);
};
};
template <typename type>
__global__ void kernel_affine_2d( const type * xin, const type * yin,
type * xout, type * yout,
const type * param, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xy equal size
type a0 = param[0]; type a1 = param[1];
type a2 = param[2]; type a3 = param[3];
type t0 = param[4]; type t1 = param[5];
if (i < n)
{
xout[i] = (type)(a0*xin[i] + a1*yin[i] + t0);
yout[i] = (type)(a2*xin[i] + a3*yin[i] + t1);
};
};
template <typename type>
__global__ void kernel_affine_3d( const type * xin, const type * yin, const type * zin,
type * xout, type * yout, type * zout,
const type * param, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xyz equal size
type a0 = param[0]; type a1 = param[1]; type a2 = param[2];
type a3 = param[3]; type a4 = param[4]; type a5 = param[5];
type a6 = param[6]; type a7 = param[7]; type a8 = param[8];
type t0 = param[9]; type t1 = param[10]; type t2 = param[11];
if (i < n)
{
xout[i] = (type)(a0*xin[i] + a1*yin[i] + a2*zin[i] + t0);
yout[i] = (type)(a3*xin[i] + a4*yin[i] + a5*zin[i] + t1);
zout[i] = (type)(a6*xin[i] + a7*yin[i] + a8*zin[i] + t2);
};
};
template <typename type>
__global__ void kernel_affine_sod_2d( const type * xin, const type * yin,
type * xout, type * yout,
const double * sod, int n)
{
// consider sod conversion to float to support all gpu
int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xy equal size
double c0 = sod[0]; double c1 = sod[1];
double o0 = sod[2]; double o1 = sod[3];
double d0 = sod[4]; double d1 = sod[5];
double d2 = sod[6]; double d3 = sod[7];
if (i < n)
{
xout[i] = (type)(d0*c0*xin[i] + d1*c1*yin[i] + o0);
yout[i] = (type)(d2*c0*xin[i] + d3*c1*yin[i] + o1);
}
};
template <typename type>
__global__ void kernel_affine_sod_3d( const type * xin, const type * yin, const type * zin,
type * xout, type * yout, type * zout,
const double * sod, int n)
{
// consider sod conversion to float to support all gpu
int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xyz equal size
double c0 = sod[0]; double c1 = sod[1]; double c2 = sod[2];
double o0 = sod[3]; double o1 = sod[4]; double o2 = sod[5];
double d0 = sod[6]; double d1 = sod[7]; double d2 = sod[8];
double d3 = sod[9]; double d4 = sod[10]; double d5 = sod[11];
double d6 = sod[12]; double d7 = sod[13]; double d8 = sod[14];
if (i < n)
{
xout[i] = (type)(d0*c0*xin[i] + d1*c1*yin[i] + d2*c2*zin[i] + o0);
yout[i] = (type)(d3*c0*xin[i] + d4*c1*yin[i] + d5*c2*zin[i] + o1);
zout[i] = (type)(d6*c0*xin[i] + d7*c1*yin[i] + d8*c2*zin[i] + o2);
};
};
template <typename type>
__global__ void kernel_dfield_2d( const type * xin, const type * yin, // grid coordinates
const type * x, const type * y, // vector field
type * xout, type * yout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xy equal size
if (i < n)
{
xout[i] = xin[i] + x[i];
yout[i] = yin[i] + y[i];
};
};
template <typename type>
__global__ void kernel_dfield_3d( const type * xin, const type * yin, const type * zin, // grid coordinates
const type * x, const type * y, const type * z, // vector field
type * xout, type * yout, type * zout, // output coordinates
int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xy equal size
if (i < n)
{
xout[i] = xin[i] + x[i];
yout[i] = yin[i] + y[i];
zout[i] = zin[i] + z[i];
};
};
template <typename type>
__global__ void kernel_nearest_interpolation_2d( const type * xo, const type * yo,
const type * imgr, type * imgo,
int w, int h, //img ref width and height
int n0, int n1) //img out dims
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < n0 && j < n1)
{
int x = round(xo[i + j*n0]);
int y = round(yo[i + j*n0]);
if(x >= 0 && x < w && y >= 0 && y < h)
{
imgo[i + j*n0] = imgr[x + y*w];
};
};
};
template <typename type>
__global__ void kernel_nearest_interpolation_3d( const type * xo, const type * yo, const type * zo,
const type * imgr, type * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
if (i < n0 && j < n1 && k < n2)
{
int x = round(xo[i + j*n0 + k*n0*n1]);
int y = round(yo[i + j*n0 + k*n0*n1]);
int z = round(yo[i + j*n0 + k*n0*n1]);
if(x >= 0 && x < w && y >= 0 && y < h && z >= 0 && z < l)
{
imgo[i + j*n0 + k*n0*n1] = imgr[x + y*w + z*w*h];
};
};
};
template <typename type>
__global__ void kernel_linear_interpolation_2d( const type * xo, const type * yo,
const type * imgr, type * imgo,
int w, int h, //img ref width and height
int n0, int n1) //img out dims
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < n0 && j < n1)
{
type zero = 0.01;
int idx = i + j*n0;
type xt = xo[idx];
type yt = yo[idx];
int x = floor(xt);
int y = floor(yt);
if(x >= 0 && x < w && y >= 0 && y < h)
{
type dx = xt - (type)x;
type dy = yt - (type)y;
if (dx < zero && dy < zero)
{
imgo[idx] = imgr[x+y*w];
}
else if (dy < zero || y >= h - 1) // same y
{
imgo[idx] = imgr[x+y*w]*(1-dx) + imgr[x+1+y*w]*(dx);
}
else if (dx < zero || x >= w - 1) // same x
{
imgo[idx] = imgr[x+y*w]*(1-dy) + imgr[x+(y+1)*w]*(dy);
}
else
{
// compute case x & y
type dxdy = dx*dy;
type r = imgr[x+y*w]*(1-dx-dy+dxdy) + imgr[x+1+y*w]*(dx-dxdy) + imgr[x+(y+1)*w]*(dy-dxdy) + imgr[x+1+(y+1)*w]*dxdy;
imgo[idx] = r;
};
};
};
};
// template <typename type>
// __global__ void kernel_linear_interpolation_2d( const type * xo, const type * yo,
// const type * imgr, type * imgo,
// int w, int h, //img ref width and height
// int n0, int n1) //img out dims
// {
// int i = blockDim.x * blockIdx.x + threadIdx.x;
// int j = blockDim.y * blockIdx.y + threadIdx.y;
// if (i < n0 && j < n1)
// {
// type xt = xo[i + j*n0];
// type yt = yo[i + j*n0];
// int x = floor(xt);
// int y = floor(yt);
// if(x >= 0 && x < w && y >= 0 && y < h - 1)
// {
// // __shared__ iv[4];
// type iv[4] = {imgr[x+y*w], imgr[x+1+y*w], imgr[x+(y+1)*w], imgr[x+1+(y+1)*w]};
// type dx = xt - (type)x;
// type dy = yt - (type)y;
// type dxdy = dx*dy;
// type r = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy;
// imgo[i + j*n0] = r;
// }
// else if(x >= 0 && x < w && y == h - 1) // border case
// {
// type iv[2] = {imgr[x+y*w], imgr[x+1+y*w]};
// type dx = xt - (type)x;
// type r = iv[0]*(1-dx) + iv[1]*(dx);
// imgo[i + j*n0] = r;
// };
// };
// };
template <typename type>
__global__ void kernel_linear_interpolation_3d( const type * xo, const type * yo, const type * zo,
const type * imgr, type * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
if (i < n0 && j < n1 && k < n2)
{
type zero = 0.01;
int idx = i + j*n0 + k*n0*n1;
type xt = xo[idx];
type yt = yo[idx];
type zt = zo[idx];
int x = floor(xt);
int y = floor(yt);
int z = floor(zt);
if(x >= 0 && x < w && y >= 0 && y < h && z >= 0 && z < l)
{
type dx = xt - (type)x;
type dy = yt - (type)y;
type dz = zt - (type)z;
if (dx <= zero && dy <= zero && dz <= zero)
{
imgo[idx] = imgr[x+y*w+z*w*h];
}
else if (dz <= zero || z >= l - 1) // same z
{
if (dy <= zero || y >= h - 1) // same y
{
imgo[idx] = imgr[x+y*w+z*w*h]*(1-dx) + imgr[x+1+y*w+z*w*h]*(dx);
}
else if (dx <= zero || x >= w - 1) // same x
{
imgo[idx] = imgr[x+y*w+z*w*h]*(1-dy) + imgr[x+(y+1)*w+z*w*h]*(dy);
}
else
{
// compute case x & y
type dxdy = dx*dy;
type r = imgr[x+y*w+z*w*h]*(1-dx-dy+dxdy) + imgr[x+1+y*w+z*w*h]*(dx-dxdy) + imgr[x+(y+1)*w+z*w*h]*(dy-dxdy) + imgr[x+1+(y+1)*w+z*w*h]*dxdy;
imgo[idx] = r;
};
}
else if (dy <= zero || y >= h - 1) // same y
{
if (dx <= zero || x >= w - 1) // same x
{
imgo[idx] = imgr[x+y*w+z*w*h]*(1-dz) + imgr[x+y*w+(z+1)*w*h]*(dz);
}
else
{
// compute case x & z
type dxdz = dx*dz;
type r = imgr[x+y*w+z*w*h]*(1-dx-dz+dxdz) + imgr[x+1+y*w+z*w*h]*(dx-dxdz) + imgr[x+y*w+(z+1)*w*h]*(dz-dxdz) + imgr[x+1+y*w+(z+1)*w*h]*dxdz;
imgo[idx] = r;
};
}
else if (dx <= zero || x >= w - 1) // same x
{
// compute case y & z
type dydz = dy*dz;
type r = imgr[x+y*w+z*w*h]*(1-dy-dz+dydz) + imgr[x+(y+1)*w+z*w*h]*(dy-dydz) + imgr[x+y*w+(z+1)*w*h]*(dz-dydz) + imgr[x+(y+1)*w+(z+1)*w*h]*dydz;
imgo[idx] = r;
}
else
{
// compute case x & y & z
type dxdy = dx*dy;
type rv = imgr[x+y*w+z*w*h]*(1-dx-dy+dxdy) + imgr[x+1+y*w+z*w*h]*(dx-dxdy) + imgr[x+(y+1)*w+z*w*h]*(dy-dxdy) + imgr[x+1+(y+1)*w+z*w*h]*dxdy;
type rw = imgr[x+y*w+(z+1)*w*h]*(1-dx-dy+dxdy) + imgr[x+1+y*w+(z+1)*w*h]*(dx-dxdy) + imgr[x+(y+1)*w+(z+1)*w*h]*(dy-dxdy) + imgr[x+1+(y+1)*w+(z+1)*w*h]*dxdy;
type r = rv*(1-dz) + rw*dz;
imgo[idx] = r;
};
};
};
};
// template <typename type>
// __global__ void kernel_linear_interpolation_3d( const type * xo, const type * yo, const type * zo,
// const type * imgr, type * imgo,
// int w, int h, int l, //img ref width, height and length
// int n0, int n1, int n2)
// {
// int i = (blockIdx.x * blockDim.x) + threadIdx.x;
// int j = (blockIdx.y * blockDim.y) + threadIdx.y;
// int k = (blockIdx.z * blockDim.z) + threadIdx.z;
// if (i < n0 && j < n1 && k < n2)
// {
// type xt = xo[i + j*n0 + k*n0*n1];
// type yt = yo[i + j*n0 + k*n0*n1];
// type zt = zo[i + j*n0 + k*n0*n1];
// int x = floor(xt);
// int y = floor(yt);
// int z = floor(zt);
// if(x >= 0 && x < w && y >= 0 && y < h && z >= 0 && z < l-1)
// {
// type iv[4] = {imgr[x+y*w+z*w*h], imgr[x+1+y*w+z*w*h], imgr[x+(y+1)*w+z*w*h], imgr[x+1+(y+1)*w+z*w*h]};
// type iw[4] = {imgr[x+y*w+(z+1)*w*h], imgr[x+1+y*w+(z+1)*w*h], imgr[x+(y+1)*w+(z+1)*w*h], imgr[x+1+(y+1)*w+(z+1)*w*h]};
// type dx = xt - (type)x;
// type dy = yt - (type)y;
// type dxdy = dx*dy;
// type rv = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy;
// type rw = iw[0]*(1-dx-dy+dxdy) + iw[1]*(dx-dxdy) + iw[2]*(dy-dxdy) + iw[3]*dxdy;
// type dz = zt - (type)z;
// type r = rv*(1-dz) + rw*dz;
// imgo[i + j*n0 + k*n0*n1] = r;
// }
// else if(x >= 0 && x < w && y >= 0 && y < h && z == l-1) // border case
// {
// type iv[4] = {imgr[x+y*w+z*w*h], imgr[x+1+y*w+z*w*h], imgr[x+(y+1)*w+z*w*h], imgr[x+1+(y+1)*w+z*w*h]};
// type dx = xt - (type)x;
// type dy = yt - (type)y;
// type dxdy = dx*dy;
// type rv = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy;
// imgo[i + j*n0 + k*n0*n1] = rv;
// };
// };
// };
// template <typename type>
// __device__ void cubic(type p[4], type * x, type * out)
// {
// out[0] = p[1] + 0.5 * x[0]*(p[2] - p[0] + x[0]*(2.0*p[0] - 5.0*p[1] + 4.0*p[2] - p[3] + x[0]*(3.0*(p[1] - p[2]) + p[3] - p[0])));
// };
template <typename type>
__device__ type cubic(type p[4], type x)
{
return p[1] + 0.5 * x*(p[2] - p[0] + x*(2.0*p[0] - 5.0*p[1] + 4.0*p[2] - p[3] + x*(3.0*(p[1] - p[2]) + p[3] - p[0])));
};
template <typename type>
__global__ void kernel_cubic_interpolation_2d( const type * xo, const type * yo,
const type * imgr, type * imgo,
int w, int h, //img ref width and height
int n0, int n1) //img out dims
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < n0 && j < n1)
{
type xt = xo[i + j*n0];
type yt = yo[i + j*n0];
int x = floor(xt);
int y = floor(yt);
if(x >= 1 && x < w - 2 && y >= 1 && y < h - 2)
{
type dx = xt - (type)x;
type dy = yt - (type)y;
type r0[4] = {imgr[x-1+(y-1)*w], imgr[x+(y-1)*w], imgr[x+1+(y-1)*w], imgr[x+2+(y-1)*w]};
type r1[4] = {imgr[x-1+(y)*w] , imgr[x+(y)*w] , imgr[x+1+(y)*w] , imgr[x+2+(y)*w]};
type r2[4] = {imgr[x-1+(y+1)*w], imgr[x+(y+1)*w], imgr[x+1+(y+1)*w], imgr[x+2+(y+1)*w]};
type r3[4] = {imgr[x-1+(y+2)*w], imgr[x+(y+2)*w], imgr[x+1+(y+2)*w], imgr[x+2+(y+2)*w]};
type r[4] = {cubic(r0, dx), cubic(r1, dx), cubic(r2, dx), cubic(r3, dx) };
imgo[i + j*n0] = cubic(r, dy);
}
else if(x >= 0 && x < w && y >= 0 && y < h - 1)
{
// __shared__ iv[4];
type iv[4] = {imgr[x+y*w], imgr[x+1+y*w], imgr[x+(y+1)*w], imgr[x+1+(y+1)*w]};
type dx = xt - (type)x;
type dy = yt - (type)y;
type dxdy = dx*dy;
type r = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy;
imgo[i + j*n0] = r;
}
else if(x >= 0 && x < w && y == h - 1) // border case
{
type iv[2] = {imgr[x+y*w], imgr[x+1+y*w]};
type dx = xt - (type)x;
type r = iv[0]*(1-dx) + iv[1]*(dx);
imgo[i + j*n0] = r;
};
};
};
template <typename type>
__global__ void kernel_cubic_interpolation_3d( const type * xo, const type * yo, const type * zo,
const type * imgr, type * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
if (i < n0 && j < n1 && k < n2)
{
type xt = xo[i + j*n0 + k*n0*n1];
type yt = yo[i + j*n0 + k*n0*n1];
type zt = zo[i + j*n0 + k*n0*n1];
int x = floor(xt);
int y = floor(yt);
int z = floor(zt);
if(x >= 1 && x < w - 2 && y >= 1 && y < h - 2 && z >= 1 && z < l - 2)
{
type dx = xt - (type)x;
type dy = yt - (type)y;
type dz = zt - (type)z;
type r00[4] = {imgr[x-1+(y-1)*w+(z-1)*w*h], imgr[x+(y-1)*w+(z-1)*w*h], imgr[x+1+(y-1)*w+(z-1)*w*h], imgr[x+2+(y-1)*w+(z-1)*w*h]};
type r01[4] = {imgr[x-1+(y)*w+(z-1)*w*h] , imgr[x+(y)*w+(z-1)*w*h] , imgr[x+1+(y)*w+(z-1)*w*h] , imgr[x+2+(y)*w+(z-1)*w*h]};
type r02[4] = {imgr[x-1+(y+1)*w+(z-1)*w*h], imgr[x+(y+1)*w+(z-1)*w*h], imgr[x+1+(y+1)*w+(z-1)*w*h], imgr[x+2+(y+1)*w+(z-1)*w*h]};
type r03[4] = {imgr[x-1+(y+2)*w+(z-1)*w*h], imgr[x+(y+2)*w+(z-1)*w*h], imgr[x+1+(y+2)*w+(z-1)*w*h], imgr[x+2+(y+2)*w+(z-1)*w*h]};
type rx0[4] = {cubic(r00, dx), cubic(r01, dx), cubic(r02, dx), cubic(r03, dx)};
type r10[4] = {imgr[x-1+(y-1)*w+z*w*h], imgr[x+(y-1)*w+z*w*h], imgr[x+1+(y-1)*w+z*w*h], imgr[x+2+(y-1)*w+z*w*h]};
type r11[4] = {imgr[x-1+(y)*w+z*w*h] , imgr[x+(y)*w+z*w*h] , imgr[x+1+(y)*w+z*w*h] , imgr[x+2+(y)*w+z*w*h]};
type r12[4] = {imgr[x-1+(y+1)*w+z*w*h], imgr[x+(y+1)*w+z*w*h], imgr[x+1+(y+1)*w+z*w*h], imgr[x+2+(y+1)*w+z*w*h]};
type r13[4] = {imgr[x-1+(y+2)*w+z*w*h], imgr[x+(y+2)*w+z*w*h], imgr[x+1+(y+2)*w+z*w*h], imgr[x+2+(y+2)*w+z*w*h]};
type rx1[4] = {cubic(r10, dx), cubic(r11, dx), cubic(r12, dx), cubic(r13, dx)};
type r20[4] = {imgr[x-1+(y-1)*w+(z+1)*w*h], imgr[x+(y-1)*w+(z+1)*w*h], imgr[x+1+(y-1)*w+(z+1)*w*h], imgr[x+2+(y-1)*w+(z+1)*w*h]};
type r21[4] = {imgr[x-1+(y)*w+(z+1)*w*h] , imgr[x+(y)*w+(z+1)*w*h] , imgr[x+1+(y)*w+(z+1)*w*h] , imgr[x+2+(y)*w+(z+1)*w*h]};
type r22[4] = {imgr[x-1+(y+1)*w+(z+1)*w*h], imgr[x+(y+1)*w+(z+1)*w*h], imgr[x+1+(y+1)*w+(z+1)*w*h], imgr[x+2+(y+1)*w+(z+1)*w*h]};
type r23[4] = {imgr[x-1+(y+2)*w+(z+1)*w*h], imgr[x+(y+2)*w+(z+1)*w*h], imgr[x+1+(y+2)*w+(z+1)*w*h], imgr[x+2+(y+2)*w+(z+1)*w*h]};
type rx2[4] = {cubic(r20, dx), cubic(r21, dx), cubic(r22, dx), cubic(r23, dx)};
type r30[4] = {imgr[x-1+(y-1)*w+(z+2)*w*h], imgr[x+(y-1)*w+(z+2)*w*h], imgr[x+1+(y-1)*w+(z+2)*w*h], imgr[x+2+(y-1)*w+(z+2)*w*h]};
type r31[4] = {imgr[x-1+(y)*w+(z+2)*w*h] , imgr[x+(y)*w+(z+2)*w*h] , imgr[x+1+(y)*w+(z+2)*w*h] , imgr[x+2+(y)*w+(z+2)*w*h]};
type r32[4] = {imgr[x-1+(y+1)*w+(z+2)*w*h], imgr[x+(y+1)*w+(z+2)*w*h], imgr[x+1+(y+1)*w+(z+2)*w*h], imgr[x+2+(y+1)*w+(z+2)*w*h]};
type r33[4] = {imgr[x-1+(y+2)*w+(z+2)*w*h], imgr[x+(y+2)*w+(z+2)*w*h], imgr[x+1+(y+2)*w+(z+2)*w*h], imgr[x+2+(y+2)*w+(z+2)*w*h]};
type rx3[4] = {cubic(r30, dx), cubic(r31, dx), cubic(r32, dx), cubic(r33, dx)};
type ry[4] = {cubic(rx0, dy), cubic(rx1, dy), cubic(rx2, dy), cubic(rx3, dy)};
imgo[i + j*n0 + k*n0*n1] = cubic(ry, dz);
}
else if(x >= 0 && x < w && y >= 0 && y < h && z >= 0 && z < l-1)
{
type iv[4] = {imgr[x+y*w+z*w*h], imgr[x+1+y*w+z*w*h], imgr[x+(y+1)*w+z*w*h], imgr[x+1+(y+1)*w+z*w*h]};
type iw[4] = {imgr[x+y*w+(z+1)*w*h], imgr[x+1+y*w+(z+1)*w*h], imgr[x+(y+1)*w+(z+1)*w*h], imgr[x+1+(y+1)*w+(z+1)*w*h]};
type dx = xt - (type)x;
type dy = yt - (type)y;
type dxdy = dx*dy;
type rv = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy;
type rw = iw[0]*(1-dx-dy+dxdy) + iw[1]*(dx-dxdy) + iw[2]*(dy-dxdy) + iw[3]*dxdy;
type dz = zt - (type)z;
type r = rv*(1-dz) + rw*dz;
imgo[i + j*n0 + k*n0*n1] = r;
}
else if(x >= 0 && x < w && y >= 0 && y < h && z == l-1) // border case
{
type iv[4] = {imgr[x+y*w+z*w*h], imgr[x+1+y*w+z*w*h], imgr[x+(y+1)*w+z*w*h], imgr[x+1+(y+1)*w+z*w*h]};
type dx = xt - (type)x;
type dy = yt - (type)y;
type dxdy = dx*dy;
type rv = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy;
imgo[i + j*n0 + k*n0*n1] = rv;
};
};
};
template <typename type>
__global__ void kernel_gradientx( const type * imgr, type * imgo,
int n0, int n1, int n2)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
if (i < n0 && j < n1 && (k == 0 || k < n2))
{
if(i == 0)
{
imgo[i + j*n0 + k*n0*n1] = imgr[i+1 + j*n0 + k*n0*n1] - imgr[i + j*n0 + k*n0*n1];
}
else if(i == n0 - 1)
{
imgo[i + j*n0 + k*n0*n1] = imgr[i + j*n0 + k*n0*n1] - imgr[i-1 + j*n0 + k*n0*n1];
}
else
{
imgo[i + j*n0 + k*n0*n1] = 0.5*imgr[i+1 + j*n0 + k*n0*n1] - 0.5*imgr[i-1 + j*n0 + k*n0*n1];
};
};
};
template <typename type>
__global__ void kernel_gradienty( const type * imgr, type * imgo,
int n0, int n1, int n2)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
if (i < n0 && j < n1 && (k == 0 || k < n2))
{
if(j == 0)
{
imgo[i + j*n0 + k*n0*n1] = imgr[i + (j+1)*n0 + k*n0*n1] - imgr[i + j*n0 + k*n0*n1];
}
else if(j == n1 - 1)
{
imgo[i + j*n0 + k*n0*n1] = imgr[i + j*n0 + k*n0*n1] - imgr[i + (j-1)*n0 + k*n0*n1];
}
else
{
imgo[i + j*n0 + k*n0*n1] = 0.5*imgr[i + (j+1)*n0 + k*n0*n1] - 0.5*imgr[i + (j-1)*n0 + k*n0*n1];
};
};
};
template <typename type>
__global__ void kernel_gradientz( const type * imgr, type * imgo,
int n0, int n1, int n2)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
if (i < n0 && j < n1 && k < n2)
{
if(k == 0)
{
imgo[i + j*n0 + k*n0*n1] = imgr[i + j*n0 + (k+1)*n0*n1] - imgr[i + j*n0 + k*n0*n1];
}
else if(k == n2 - 1)
{
imgo[i + j*n0 + k*n0*n1] = imgr[i + j*n0 + k*n0*n1] - imgr[i + j*n0 + (k-1)*n0*n1];
}
else
{
imgo[i + j*n0 + k*n0*n1] = 0.5*imgr[i + j*n0 + (k+1)*n0*n1] - 0.5*imgr[i + j*n0 + (k-1)*n0*n1];
};
};
};
template <typename type>
__global__ void kernel_convolution_2d( const type * imgr, const type * kern, //kernel width
type * imgo, int n0, int n1, int kw0, int kw1)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int off0 = kw0>>1; int off1 = kw1>>1;
if(i >= off0 && i < n0 - off0 && j >= off1 && j < n1 - off1)
{
type sum = 0;
for (int p = 0; p < kw1; p++)
{
for (int q = 0; q < kw0; q++)
{
sum += imgr[i+p-off0 + (j+q-off1)*n0] * kern[p*kw0 + q];
};
};
imgo[i + j*n0] = sum;
};
};
template <typename type>
__global__ void kernel_convolution_3d( const type * imgr, const type * kern, //kernel width
type * imgo, int n0, int n1, int n2, int kw0, int kw1, int kw2)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
int off0 = kw0>>1; int off1 = kw1>>1; int off2 = kw2>>1;
if(i >= off0 && i < n0 - off0 && j >= off1 && j < n1 - off1 && k >= off2 && k < n2 - off2)
{
type sum = 0;
for (int r = 0; r < kw2; r++)
{
for (int p = 0; p < kw1; p++)
{
for (int q = 0; q < kw0; q++)
{
sum += imgr[i+q-off0 + (j+p-off1)*n0 + (k+r-off2)*n0*n1] * kern[r*kw0*kw1 + p*kw0 + q];
};
};
};
imgo[i + j*n0 + k*n0*n1] = sum;
};
};
// ===========================================
// Kernels Calls
// ===========================================
// ===========================================
// Data Kernels
// ===========================================
template <typename type>
void cuda_kernel_assign( std::vector<int> & grid, std::vector<int> & block,
type * vin, type value, int n )
{
// printf("kernel assign init\n");
// printf("block: [%i, %i, %i]\n", block[0], block[1] , block[2]);
// printf("grid: [%i, %i, %i]\n", grid[0], grid[1] , grid[2]);
// printf("address: %x\n", vin);
// printf("value: %f\n", value);
// printf("size: %i\n", n);
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_assign), dim3(grd),dim3(blk), 0, 0, vin, value, n);
// kernel_assign<type><<<grd,blk>>>(vin, value, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel assign" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel assign" );
// printf("kernel assign finish\n");
};
template <typename type>
void cuda_kernel_copy( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_copy), dim3(grd),dim3(blk), 0, 0, vin, vout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel copy" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel copy" );
};
template <typename typein, typename typeout>
void cuda_kernel_cast( std::vector<int> & grid, std::vector<int> & block,
const typein * vin, typeout * vout, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_cast <typein,typeout>), dim3(grd),dim3(blk), 0, 0, vin, vout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel cast" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel cast" );
};
// ===========================================
// Vector Kernels
// ===========================================
template <typename type>
void cuda_kernel_add_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_add_scalar), dim3(grd),dim3(blk), 0, 0, vin, vout, scalar, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel add scalar" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel add scalar" );
};
template <typename type>
void cuda_kernel_sub_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_sub_scalar), dim3(grd),dim3(blk), 0, 0, vin, vout, scalar, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel sub scalar" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel sub scalar" );
};
template <typename type>
void cuda_kernel_sub_scalar_inv( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_sub_scalar_inv), dim3(grd),dim3(blk), 0, 0, vin, vout, scalar, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel sub scalar inv" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel sub scalar inv" );
};
template <typename type>
void cuda_kernel_mul_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_mul_scalar), dim3(grd),dim3(blk), 0, 0, vin, vout, scalar, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel mul scalar" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel mul scalar" );
};
template <typename type>
void cuda_kernel_div_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_div_scalar), dim3(grd),dim3(blk), 0, 0, vin, vout, scalar, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel div scalar" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel div scalar" );
};
template <typename type>
void cuda_kernel_div_scalar_inv( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_div_scalar_inv), dim3(grd),dim3(blk), 0, 0, vin, vout, scalar, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel div scalar inv" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel div scalar inv" );
};
template <typename type>
void cuda_kernel_pow_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_pow_scalar), dim3(grd),dim3(blk), 0, 0, vin, vout, scalar, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel pow scalar" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel pow scalar" );
};
template <typename type>
void cuda_kernel_pow_scalar_inv( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_pow_scalar_inv), dim3(grd),dim3(blk), 0, 0, vin, vout, scalar, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel pow scalar inv" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel pow scalar inv" );
};
template <typename type>
void cuda_kernel_add( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_add), dim3(grd),dim3(blk), 0, 0, vin1, vin2, vout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel add" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel add" );
};
template <typename type>
void cuda_kernel_sub( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_sub), dim3(grd),dim3(blk), 0, 0, vin1, vin2, vout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel sub" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel sub" );
};
template <typename type>
void cuda_kernel_mul( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_mul), dim3(grd),dim3(blk), 0, 0, vin1, vin2, vout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel mul" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel mul" );
};
template <typename type>
void cuda_kernel_div( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_div), dim3(grd),dim3(blk), 0, 0, vin1, vin2, vout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel div" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel div" );
};
template <typename type>
void cuda_kernel_pow( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_pow), dim3(grd),dim3(blk), 0, 0, vin1, vin2, vout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel pow" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel pow" );
};
template <typename type>
void cuda_kernel_equal( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_equal), dim3(grd),dim3(blk), 0, 0, vin1, vin2, vout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel equal" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel equal" );
};
template <typename type>
void cuda_kernel_greater( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_greater), dim3(grd),dim3(blk), 0, 0, vin1, vin2, vout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel" );
};
template <typename type>
void cuda_kernel_less( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_less), dim3(grd),dim3(blk), 0, 0, vin1, vin2, vout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel" );
};
template <typename type>
void cuda_kernel_greater_equal( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_greater_equal), dim3(grd),dim3(blk), 0, 0, vin1, vin2, vout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel greater equal" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel greater equal" );
};
template <typename type>
void cuda_kernel_less_equal( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_less_equal), dim3(grd),dim3(blk), 0, 0, vin1, vin2, vout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel less equal" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel less equal" );
};
template <typename type>
void cuda_kernel_equal_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_equal_scalar), dim3(grd),dim3(blk), 0, 0, vin, vout, scalar, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel equal scalar" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel equal scalar" );
};
template <typename type>
void cuda_kernel_greater_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_greater_scalar), dim3(grd),dim3(blk), 0, 0, vin, vout, scalar, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel greater scalar" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel greater scalar" );
};
template <typename type>
void cuda_kernel_less_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_less_scalar), dim3(grd),dim3(blk), 0, 0, vin, vout, scalar, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel less scalar" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel less scalar" );
};
template <typename type>
void cuda_kernel_greater_equal_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_greater_equal_scalar), dim3(grd),dim3(blk), 0, 0, vin, vout, scalar, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel greater equal scalar" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel greater equal scalar" );
};
template <typename type>
void cuda_kernel_less_equal_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_less_equal_scalar), dim3(grd),dim3(blk), 0, 0, vin, vout, scalar, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel less equal scalar" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel less equal scalar" );
};
template <typename type>
void cuda_kernel_replace( std::vector<int> & grid, std::vector<int> & block,
const type * idxs, const type * vin, type * vout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_replace), dim3(grd),dim3(blk), 0, 0, idxs, vin, vout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel replace" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel replace" );
};
template <typename type>
void cuda_kernel_replace_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * idxs, type * vout, type value, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_replace_scalar), dim3(grd),dim3(blk), 0, 0, idxs, vout, value, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel replace" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel replace" );
};
// ===========================================
// Reduction Kernels
// ===========================================
template <typename type>
void cuda_kernel_sum( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, int n)
{
// printf("kernel sum init\n");
// printf("block: [%i, %i, %i]\n", block[0], block[1] , block[2]);
// printf("grid: [%i, %i, %i]\n", grid[0], grid[1] , grid[2]);
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_sum), dim3(grd),dim3(blk), 0, 0, vin, vout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel sum" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel sum" );
};
template <typename type>
void cuda_kernel_min( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_min), dim3(grd),dim3(blk), 0, 0, vin, vout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel min" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel min" );
};
template <typename type>
void cuda_kernel_max( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_max), dim3(grd),dim3(blk), 0, 0, vin, vout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel max" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel max" );
};
// ===========================================
// Image Kernels
// ===========================================
template <typename type>
void cuda_kernel_pad_2d( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, int start0, int start1,
int end0, int end1, int n0, int n1 )
{
// printf("kernel pad init\n");
// printf("block: [%i, %i, %i]\n", block[0], block[1] , block[2]);
// printf("grid: [%i, %i, %i]\n", grid[0], grid[1] , grid[2]);
dim3 grd(grid[0],grid[1]);
dim3 blk(block[0],block[1]);
hipLaunchKernelGGL(( kernel_pad_2d), dim3(grd),dim3(blk), 0, 0, vin, vout, start0, start1, end0, end1, n0, n1);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel pad 2d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel pad 2d" );
};
template <typename type>
void cuda_kernel_unpad_2d( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, int start0, int start1,
int end0, int end1, int n0, int n1 )
{
dim3 grd(grid[0],grid[1]);
dim3 blk(block[0],block[1]);
hipLaunchKernelGGL(( kernel_unpad_2d), dim3(grd),dim3(blk), 0, 0, vin, vout, start0, start1, end0, end1, n0, n1);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel unpad 2d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel unpad 2d" );
};
template <typename type>
void cuda_kernel_pad_3d( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2 )
{
// printf("kernel pad 3d init\n");
// printf("block: [%i, %i, %i]\n", block[0], block[1] , block[2]);
// printf("grid: [%i, %i, %i]\n", grid[0], grid[1] , grid[2]);
dim3 grd(grid[0],grid[1],grid[2]);
dim3 blk(block[0],block[1],block[2]);
hipLaunchKernelGGL(( kernel_pad_3d), dim3(grd),dim3(blk), 0, 0, vin, vout, start0, start1, start2, end0, end1, end2, n0, n1, n2);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel pad 3d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel pad 3d" );
};
template <typename type>
void cuda_kernel_unpad_3d( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2 )
{
dim3 grd(grid[0],grid[1],grid[2]);
dim3 blk(block[0],block[1],block[2]);
hipLaunchKernelGGL(( kernel_unpad_3d), dim3(grd),dim3(blk), 0, 0, vin, vout, start0, start1, start2, end0, end1, end2, n0, n1, n2);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel unpad 3d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel unpad 3d" );
};
template <typename type>
void cuda_kernel_grid_2d( std::vector<int> & grid, std::vector<int> & block,
type * x, type * y, double * sod,
int n0, int n1)
{
dim3 grd(grid[0],grid[1]);
dim3 blk(block[0],block[1]);
hipLaunchKernelGGL(( kernel_grid_2d), dim3(grd),dim3(blk), 0, 0, x, y, sod, n0, n1);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel grid 2d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel grid 2d" );
};
template <typename type>
void cuda_kernel_grid_3d( std::vector<int> & grid, std::vector<int> & block,
type * x, type * y, type * z, double * sod,
int n0, int n1, int n2)
{
dim3 grd(grid[0],grid[1],grid[2]);
dim3 blk(block[0],block[1],block[2]);
hipLaunchKernelGGL(( kernel_grid_3d), dim3(grd),dim3(blk), 0, 0, x, y, z, sod, n0, n1, n2);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel grid 3d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel grid 3d" );
};
template <typename type>
void cuda_kernel_affine_2d( std::vector<int> & grid, std::vector<int> & block,
const type * xin, const type * yin,
type * xout, type * yout,
const type * param, int n)
{
// printf("kernel affine 2d init\n");
// printf("block: [%i, %i, %i]\n", block[0], block[1] , block[2]);
// printf("grid: [%i, %i, %i]\n", grid[0], grid[1] , grid[2]);
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_affine_2d), dim3(grd),dim3(blk), 0, 0, xin, yin, xout, yout, param, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel affine 2d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel affine 2d" );
};
template <typename type>
void cuda_kernel_affine_3d( std::vector<int> & grid, std::vector<int> & block,
const type * xin, const type * yin, const type * zin,
type * xout, type * yout, type * zout,
const type * param, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_affine_3d), dim3(grd),dim3(blk), 0, 0, xin, yin, zin, xout, yout, zout, param, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel affine 3d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel affine 3d" );
};
template <typename type>
void cuda_kernel_affine_sod_2d( std::vector<int> & grid, std::vector<int> & block,
const type * xin, const type * yin,
type * xout, type * yout,
const double * sod, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_affine_sod_2d), dim3(grd),dim3(blk), 0, 0, xin, yin, xout, yout, sod, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel affine sod 2d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel affine sod 2d" );
};
template <typename type>
void cuda_kernel_affine_sod_3d( std::vector<int> & grid, std::vector<int> & block,
const type * xin, const type * yin, const type * zin,
type * xout, type * yout, type * zout,
const double * sod, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_affine_sod_3d), dim3(grd),dim3(blk), 0, 0, xin, yin, zin, xout, yout, zout, sod, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel affine sod 3d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel affine sod 3d" );
};
template <typename type>
void cuda_kernel_dfield_2d( std::vector<int> & grid, std::vector<int> & block,
const type * xin, const type * yin, // grid coordinates
const type * x, const type * y, // vector field
type * xout, type * yout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_dfield_2d), dim3(grd),dim3(blk), 0, 0, xin, yin, x, y, xout, yout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel dfield 2d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel dfield 2d" );
};
template <typename type>
void cuda_kernel_dfield_3d( std::vector<int> & grid, std::vector<int> & block,
const type * xin, const type * yin, const type * zin, // grid coordinates
const type * x, const type * y, const type * z, // vector field
type * xout, type * yout, type * zout, // output coordinates
int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
hipLaunchKernelGGL(( kernel_dfield_3d), dim3(grd),dim3(blk), 0, 0, xin, yin, zin, x, y, z, xout, yout, zout, n);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel dfield 3d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel dfield 3d" );
};
template <typename type>
void cuda_kernel_nearest_interpolation_2d( std::vector<int> & grid, std::vector<int> & block,
const type * xo, const type * yo,
const type * imgr, type * imgo,
int w, int h, //img ref width and height
int n0, int n1) //img out dims
{
dim3 grd(grid[0],grid[1]);
dim3 blk(block[0],block[1]);
hipLaunchKernelGGL(( kernel_nearest_interpolation_2d), dim3(grd),dim3(blk), 0, 0, xo, yo, imgr, imgo, w, h, n0, n1);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel nearest interpolation 2d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel nearest interpolation 2d" );
};
template <typename type>
void cuda_kernel_nearest_interpolation_3d( std::vector<int> & grid, std::vector<int> & block,
const type * xo, const type * yo, const type * zo,
const type * imgr, type * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2)
{
dim3 grd(grid[0],grid[1],grid[2]);
dim3 blk(block[0],block[1],block[2]);
hipLaunchKernelGGL(( kernel_nearest_interpolation_3d), dim3(grd),dim3(blk), 0, 0, xo, yo, zo, imgr, imgo, w, h, l, n0, n1, n2);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel nearest interpolation 3d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel nearest interpolation 3d" );
};
template <typename type>
void cuda_kernel_linear_interpolation_2d( std::vector<int> & grid, std::vector<int> & block,
const type * xo, const type * yo,
const type * imgr, type * imgo,
int w, int h, //img ref width and height
int n0, int n1) //img out dims
{
dim3 grd(grid[0],grid[1]);
dim3 blk(block[0],block[1]);
hipLaunchKernelGGL(( kernel_linear_interpolation_2d), dim3(grd),dim3(blk), 0, 0, xo, yo, imgr, imgo, w, h, n0, n1);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel linear interpolation 2d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel linear interpolation 2d" );
};
template <typename type>
void cuda_kernel_linear_interpolation_3d( std::vector<int> & grid, std::vector<int> & block,
const type * xo, const type * yo, const type * zo,
const type * imgr, type * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2)
{
dim3 grd(grid[0],grid[1],grid[2]);
dim3 blk(block[0],block[1],block[2]);
hipLaunchKernelGGL(( kernel_linear_interpolation_3d), dim3(grd),dim3(blk), 0, 0, xo, yo, zo, imgr, imgo, w, h, l, n0, n1, n2);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel linear interpolation 3d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel linear interpolation 3d" );
};
template <typename type>
void cuda_kernel_cubic_interpolation_2d( std::vector<int> & grid, std::vector<int> & block,
const type * xo, const type * yo,
const type * imgr, type * imgo,
int w, int h, //img ref width and height
int n0, int n1) //img out dims
{
dim3 grd(grid[0],grid[1]);
dim3 blk(block[0],block[1]);
hipLaunchKernelGGL(( kernel_cubic_interpolation_2d), dim3(grd),dim3(blk), 0, 0, xo, yo, imgr, imgo, w, h, n0, n1);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel cubic interpolation 2d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel cubic interpolation 2d" );
};
template <typename type>
void cuda_kernel_cubic_interpolation_3d( std::vector<int> & grid, std::vector<int> & block,
const type * xo, const type * yo, const type * zo,
const type * imgr, type * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2)
{
dim3 grd(grid[0],grid[1],grid[2]);
dim3 blk(block[0],block[1],block[2]);
hipLaunchKernelGGL(( kernel_cubic_interpolation_3d), dim3(grd),dim3(blk), 0, 0, xo, yo, zo, imgr, imgo, w, h, l, n0, n1, n2);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel cubic interpolation 3d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel cubic interpolation 3d" );
};
template <typename type>
void cuda_kernel_gradientx( std::vector<int> & grid, std::vector<int> & block,
const type * imgr, type * imgo,
int n0, int n1, int n2)
{
dim3 grd;
dim3 blk;
if (block[2] == 0)
{
grd = dim3(grid[0],grid[1]);
blk = dim3(block[0],block[1]);
}
else
{
grd = dim3(grid[0],grid[1],grid[2]);
blk = dim3(block[0],block[1],block[2]);
};
hipLaunchKernelGGL(( kernel_gradientx), dim3(grd),dim3(blk), 0, 0, imgr, imgo, n0, n1, n2);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel gradient x" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel gradient x" );
};
template <typename type>
void cuda_kernel_gradienty( std::vector<int> & grid, std::vector<int> & block,
const type * imgr, type * imgo,
int n0, int n1, int n2)
{
dim3 grd;
dim3 blk;
if (block[2] == 0)
{
grd = dim3(grid[0],grid[1]);
blk = dim3(block[0],block[1]);
}
else
{
grd = dim3(grid[0],grid[1],grid[2]);
blk = dim3(block[0],block[1],block[2]);
};
hipLaunchKernelGGL(( kernel_gradienty), dim3(grd),dim3(blk), 0, 0, imgr, imgo, n0, n1, n2);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel gradient y" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel gradient y" );
};
template <typename type>
void cuda_kernel_gradientz( std::vector<int> & grid, std::vector<int> & block,
const type * imgr, type * imgo,
int n0, int n1, int n2)
{
dim3 grd(grid[0],grid[1],grid[2]);
dim3 blk(block[0],block[1],block[2]);
hipLaunchKernelGGL(( kernel_gradientz), dim3(grd),dim3(blk), 0, 0, imgr, imgo, n0, n1, n2);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel gradient z" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel gradient z" );
};
template <typename type>
void cuda_kernel_convolution_2d( std::vector<int> & grid, std::vector<int> & block,
const type * imgr, const type * kern, //kernel width
type * imgo, int n0, int n1, int kw0, int kw1)
{
dim3 grd(grid[0],grid[1]);
dim3 blk(block[0],block[1]);
hipLaunchKernelGGL(( kernel_convolution_2d), dim3(grd),dim3(blk), 0, 0, imgr, kern, imgo, n0, n1, kw0, kw1);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel convolution 2d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel convolution 2d" );
};
template <typename type>
void cuda_kernel_convolution_3d( std::vector<int> & grid, std::vector<int> & block,
const type * imgr, const type * kern, //kernel width
type * imgo, int n0, int n1, int n2, int kw0, int kw1, int kw2)
{
dim3 grd(grid[0],grid[1],grid[2]);
dim3 blk(block[0],block[1],block[2]);
hipLaunchKernelGGL(( kernel_convolution_3d), dim3(grd),dim3(blk), 0, 0, imgr, kern, imgo, n0, n1, n2, kw0, kw1, kw2);
imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel convolution 3d" );
imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel convolution 3d" );
};
template <typename type>
void cuda_kernel_fft_2d( std::vector<int> & grid, std::vector<int> & block,
const type * in_real, const type * in_img,
type * out_real, type * out_img, int n0, int n1, bool forward )
{
;
};
// specialization
template <> void cuda_kernel_fft_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * in_real, const float * in_img,
float * out_real, float * out_img, int n0, int n1, bool forward )
{
int N = n0*n1;
hipfftComplex *in;
hipfftComplex *out;
hipMalloc(&in, N*sizeof(hipfftComplex));
hipMalloc(&out, N*sizeof(hipfftComplex));
float * tmpi = (float *) in;
// COPY in_real and in_img to in
imart_assert_kernel ( hipMemcpy2D(tmpi, 2 * sizeof(tmpi[0]),
in_real, 1 * sizeof(in_real[0]), sizeof(in_real[0]),
N, hipMemcpyDeviceToDevice), "Error copy device to device, real to complex");
imart_assert_kernel ( hipMemcpy2D(tmpi + 1, 2 * sizeof(tmpi[0]),
in_img, 1 * sizeof(in_img[0]), sizeof(in_img[0]),
N, hipMemcpyDeviceToDevice), "Error copy device to device, imaginary to complex");
hipfftHandle p_fft;
// std::cout << "input\n";
hipfftPlan2d(&p_fft, n1, n0, HIPFFT_C2C);
// std::cout << "plan\n";
if (forward)
{
// Forward
// std::cout << "execute\n";
hipfftExecC2C(p_fft, (hipfftComplex *)in, (hipfftComplex *)out, HIPFFT_FORWARD);
}
else
{
// Inverse
// std::cout << "execute\n";
hipfftExecC2C(p_fft, (hipfftComplex *)in, (hipfftComplex *)out, HIPFFT_BACKWARD);
};
float * tmpo = (float *) out;
// COPY out to out_real and out_img
imart_assert_kernel ( hipMemcpy2D(out_real, 1 * sizeof(out_real[0]),
tmpo, 2 * sizeof(tmpo[0]), sizeof(out_real[0]),
N, hipMemcpyDeviceToDevice), "Error copy device to device, complex to real");
imart_assert_kernel ( hipMemcpy2D(out_img, 1 * sizeof(out_img[0]),
tmpo+1, 2 * sizeof(tmpo[0]), sizeof(out_img[0]),
N, hipMemcpyDeviceToDevice), "Error copy device to device, complex to real");
hipfftDestroy(p_fft);
hipFree(in);
hipFree(out);
};
// specialization
template <> void cuda_kernel_fft_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * in_real, const double * in_img,
double * out_real, double * out_img, int n0, int n1, bool forward )
{
int N = n0*n1;
hipfftDoubleComplex *in;
hipfftDoubleComplex *out;
hipMalloc(&in, N*sizeof(hipfftDoubleComplex));
hipMalloc(&out, N*sizeof(hipfftDoubleComplex));
double * tmpi = (double *) in;
// COPY in_real and in_img to in
imart_assert_kernel ( hipMemcpy2D(tmpi, 2 * sizeof(tmpi[0]),
in_real, 1 * sizeof(in_real[0]), sizeof(in_real[0]),
N, hipMemcpyDeviceToDevice), "Error copy device to device, real to complex");
imart_assert_kernel ( hipMemcpy2D(tmpi + 1, 2 * sizeof(tmpi[0]),
in_img, 1 * sizeof(in_img[0]), sizeof(in_img[0]),
N, hipMemcpyDeviceToDevice), "Error copy device to device, imaginary to complex");
hipfftHandle p_fft;
// std::cout << "input\n";
hipfftPlan2d(&p_fft, n1, n0, HIPFFT_C2C);
// std::cout << "plan\n";
if (forward)
{
// Forward
// std::cout << "execute\n";
hipfftExecZ2Z(p_fft, (hipfftDoubleComplex *)in, (hipfftDoubleComplex *)out, HIPFFT_FORWARD);
}
else
{
// Inverse
// std::cout << "execute\n";
hipfftExecZ2Z(p_fft, (hipfftDoubleComplex *)in, (hipfftDoubleComplex *)out, HIPFFT_BACKWARD);
};
double * tmpo = (double *) out;
// COPY out to out_real and out_img
imart_assert_kernel ( hipMemcpy2D(out_real, 1 * sizeof(out_real[0]),
tmpo, 2 * sizeof(tmpo[0]), sizeof(out_real[0]),
N, hipMemcpyDeviceToDevice), "Error copy device to device, complex to real");
imart_assert_kernel ( hipMemcpy2D(out_img, 1 * sizeof(out_img[0]),
tmpo+1, 2 * sizeof(tmpo[0]), sizeof(out_img[0]),
N, hipMemcpyDeviceToDevice), "Error copy device to device, complex to real");
hipfftDestroy(p_fft);
hipFree(in);
hipFree(out);
};
template <typename type>
void cuda_kernel_fft_3d( std::vector<int> & grid, std::vector<int> & block,
const type * in_real, const type * in_img,
type * out_real, type * out_img, int n0, int n1, int n2, bool forward )
{
;
};
// specialization
template <> void cuda_kernel_fft_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * in_real, const float * in_img,
float * out_real, float * out_img, int n0, int n1, int n2, bool forward )
{
int N = n0*n1*n2;
hipfftComplex *in;
hipfftComplex *out;
hipMalloc(&in, N*sizeof(hipfftComplex));
hipMalloc(&out, N*sizeof(hipfftComplex));
float * tmpi = (float *) in;
// COPY in_real and in_img to in
imart_assert_kernel ( hipMemcpy2D(tmpi, 2 * sizeof(tmpi[0]),
in_real, 1 * sizeof(in_real[0]), sizeof(in_real[0]),
N, hipMemcpyDeviceToDevice), "Error copy device to device, real to complex");
imart_assert_kernel ( hipMemcpy2D(tmpi + 1, 2 * sizeof(tmpi[0]),
in_img, 1 * sizeof(in_img[0]), sizeof(in_img[0]),
N, hipMemcpyDeviceToDevice), "Error copy device to device, imaginary to complex");
hipfftHandle p_fft;
// std::cout << "input\n";
hipfftPlan3d(&p_fft, n2, n1, n0, HIPFFT_C2C);
// std::cout << "plan\n";
if (forward)
{
// Forward
// std::cout << "execute\n";
hipfftExecC2C(p_fft, (hipfftComplex *)in, (hipfftComplex *)out, HIPFFT_FORWARD);
}
else
{
// Inverse
// std::cout << "execute\n";
hipfftExecC2C(p_fft, (hipfftComplex *)in, (hipfftComplex *)out, HIPFFT_BACKWARD);
};
float * tmpo = (float *) out;
// COPY out to out_real and out_img
imart_assert_kernel ( hipMemcpy2D(out_real, 1 * sizeof(out_real[0]),
tmpo, 2 * sizeof(tmpo[0]), sizeof(out_real[0]),
N, hipMemcpyDeviceToDevice), "Error copy device to device, complex to real");
imart_assert_kernel ( hipMemcpy2D(out_img, 1 * sizeof(out_img[0]),
tmpo+1, 2 * sizeof(tmpo[0]), sizeof(out_img[0]),
N, hipMemcpyDeviceToDevice), "Error copy device to device, complex to real");
hipfftDestroy(p_fft);
hipFree(in);
hipFree(out);
};
// specialization
template <> void cuda_kernel_fft_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * in_real, const double * in_img,
double * out_real, double * out_img, int n0, int n1, int n2, bool forward )
{
int N = n0*n1*n2;
hipfftDoubleComplex *in;
hipfftDoubleComplex *out;
hipMalloc(&in, N*sizeof(hipfftDoubleComplex));
hipMalloc(&out, N*sizeof(hipfftDoubleComplex));
double * tmpi = (double *) in;
// COPY in_real and in_img to in
imart_assert_kernel ( hipMemcpy2D(tmpi, 2 * sizeof(tmpi[0]),
in_real, 1 * sizeof(in_real[0]), sizeof(in_real[0]),
N, hipMemcpyDeviceToDevice), "Error copy device to device, real to complex");
imart_assert_kernel ( hipMemcpy2D(tmpi + 1, 2 * sizeof(tmpi[0]),
in_img, 1 * sizeof(in_img[0]), sizeof(in_img[0]),
N, hipMemcpyDeviceToDevice), "Error copy device to device, imaginary to complex");
hipfftHandle p_fft;
// std::cout << "input\n";
hipfftPlan3d(&p_fft, n2, n1, n0, HIPFFT_C2C);
// std::cout << "plan\n";
if (forward)
{
// Forward
// std::cout << "execute\n";
hipfftExecZ2Z(p_fft, (hipfftDoubleComplex *)in, (hipfftDoubleComplex *)out, HIPFFT_FORWARD);
}
else
{
// Inverse
// std::cout << "execute\n";
hipfftExecZ2Z(p_fft, (hipfftDoubleComplex *)in, (hipfftDoubleComplex *)out, HIPFFT_BACKWARD);
};
double * tmpo = (double *) out;
// COPY out to out_real and out_img
imart_assert_kernel ( hipMemcpy2D(out_real, 1 * sizeof(out_real[0]),
tmpo, 2 * sizeof(tmpo[0]), sizeof(out_real[0]),
N, hipMemcpyDeviceToDevice), "Error copy device to device, complex to real");
imart_assert_kernel ( hipMemcpy2D(out_img, 1 * sizeof(out_img[0]),
tmpo+1, 2 * sizeof(tmpo[0]), sizeof(out_img[0]),
N, hipMemcpyDeviceToDevice), "Error copy device to device, complex to real");
hipfftDestroy(p_fft);
hipFree(in);
hipFree(out);
};
// template <typename type>
// void cuda_kernel_( std::vector<int> & grid, std::vector<int> & block,
// )
// {
// dim3 grd(grid[0],grid[1],grid[2]);
// dim3 blk(block[0],block[1],block[2]);
// kernel_<<<grd,blk>>>();
// imart_assert_kernel( hipPeekAtLastError(), "Fail to run kernel" );
// imart_assert_kernel( hipDeviceSynchronize(), "Fail to sync kernel" );
// };
// ===========================================
// Explicit instanciation
// ===========================================
// CASTINGS
template void cuda_kernel_cast <float,double>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, double * vout, int n );
template void cuda_kernel_cast <double,float>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, float * vout, int n );
template void cuda_kernel_cast <int,float>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, float * vout, int n );
template void cuda_kernel_cast <float,int>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, int * vout, int n );
template void cuda_kernel_cast <int,double>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, double * vout, int n );
template void cuda_kernel_cast <double,int>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, int * vout, int n );
template void cuda_kernel_cast <float,unsigned short>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, unsigned short * vout, int n );
template void cuda_kernel_cast <unsigned short,float>( std::vector<int> & grid, std::vector<int> & block,
const unsigned short * vin, float * vout, int n );
template void cuda_kernel_cast <double,unsigned short>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, unsigned short * vout, int n );
template void cuda_kernel_cast <unsigned short,double>( std::vector<int> & grid, std::vector<int> & block,
const unsigned short * vin, double * vout, int n );
template void cuda_kernel_cast <float,unsigned int>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, unsigned int * vout, int n );
template void cuda_kernel_cast <unsigned int,float>( std::vector<int> & grid, std::vector<int> & block,
const unsigned int * vin, float * vout, int n );
template void cuda_kernel_cast <double,unsigned int>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, unsigned int * vout, int n );
template void cuda_kernel_cast <unsigned int,double>( std::vector<int> & grid, std::vector<int> & block,
const unsigned int * vin, double * vout, int n );
template void cuda_kernel_cast <float,unsigned char>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, unsigned char * vout, int n );
template void cuda_kernel_cast <unsigned char,float>( std::vector<int> & grid, std::vector<int> & block,
const unsigned char * vin, float * vout, int n );
template void cuda_kernel_cast <double,unsigned char>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, unsigned char * vout, int n );
template void cuda_kernel_cast <unsigned char,double>( std::vector<int> & grid, std::vector<int> & block,
const unsigned char * vin, double * vout, int n );
template void cuda_kernel_cast <float,float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, int n );
template void cuda_kernel_cast <double,double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, int n );
template void cuda_kernel_assign<float>( std::vector<int> & grid, std::vector<int> & block,
float * vin, float value, int n );
template void cuda_kernel_copy<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, int n );
template void cuda_kernel_add<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_sub<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_mul<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_div<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_pow<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_add_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n );
template void cuda_kernel_sub_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n );
template void cuda_kernel_sub_scalar_inv<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n );
template void cuda_kernel_mul_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n );
template void cuda_kernel_div_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n );
template void cuda_kernel_div_scalar_inv<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n );
template void cuda_kernel_pow_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n );
template void cuda_kernel_pow_scalar_inv<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n );
template void cuda_kernel_equal<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_greater<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_less<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_greater_equal<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_less_equal<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_equal_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n);
template void cuda_kernel_greater_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n);
template void cuda_kernel_less_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n);
template void cuda_kernel_greater_equal_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n);
template void cuda_kernel_less_equal_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n);
template void cuda_kernel_replace<float>( std::vector<int> & grid, std::vector<int> & block,
const float * idxs, const float * vin, float * vout, int n);
template void cuda_kernel_replace_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * idxs, float * vout, float value, int n);
template void cuda_kernel_sum<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, int n );
template void cuda_kernel_min<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, int n );
template void cuda_kernel_max<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, int n );
template void cuda_kernel_pad_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, int start0, int start1,
int end0, int end1, int n0, int n1 );
template void cuda_kernel_unpad_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, int start0, int start1,
int end0, int end1, int n0, int n1);
template void cuda_kernel_pad_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2);
template void cuda_kernel_unpad_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2);
template void cuda_kernel_grid_2d<float>( std::vector<int> & grid, std::vector<int> & block,
float * x, float * y, double * sod,
int n0, int n1);
template void cuda_kernel_grid_3d<float>( std::vector<int> & grid, std::vector<int> & block,
float * x, float * y, float * z, double * sod,
int n0, int n1, int n2);
template void cuda_kernel_affine_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xin, const float * yin,
float * xout, float * yout,
const float * param, int n);
template void cuda_kernel_affine_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xin, const float * yin, const float * zin,
float * xout, float * yout, float * zout,
const float * param, int n) ;
template void cuda_kernel_affine_sod_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xin, const float * yin,
float * xout, float * yout,
const double * sod, int n);
template void cuda_kernel_affine_sod_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xin, const float * yin, const float * zin,
float * xout, float * yout, float * zout,
const double * sod, int n);
template void cuda_kernel_dfield_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xin, const float * yin, // grid coordinates
const float * x, const float * y, // vector field
float * xout, float * yout, int n);
template void cuda_kernel_dfield_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xin, const float * yin, const float * zin, // grid coordinates
const float * x, const float * y, const float * z, // vector field
float * xout, float * yout, float * zout, int n);
template void cuda_kernel_nearest_interpolation_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xo, const float * yo,
const float * imgr, float * imgo,
int w, int h, //img ref width and height
int n0, int n1); //img out dims
template void cuda_kernel_nearest_interpolation_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xo, const float * yo, const float * zo,
const float * imgr, float * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2 );
template void cuda_kernel_linear_interpolation_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xo, const float * yo,
const float * imgr, float * imgo,
int w, int h, //img ref width and height
int n0, int n1); //img out dims
template void cuda_kernel_linear_interpolation_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xo, const float * yo, const float * zo,
const float * imgr, float * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2);
template void cuda_kernel_cubic_interpolation_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xo, const float * yo,
const float * imgr, float * imgo,
int w, int h, //img ref width and height
int n0, int n1); //img out dims
// template void cuda_kernel_cubic_interpolation_3d<float>( std::vector<int> & grid, std::vector<int> & block,
// const float * xo, const float * yo, const float * zo,
// const float * imgr, float * imgo,
// int w, int h, int l, //img ref width, height and length
// int n0, int n1, int n2);
template void cuda_kernel_gradientx<float>( std::vector<int> & grid, std::vector<int> & block,
const float * imgr, float * imgo,
int n0, int n1, int n2);
template void cuda_kernel_gradienty<float>( std::vector<int> & grid, std::vector<int> & block,
const float * imgr, float * imgo,
int n0, int n1, int n2);
template void cuda_kernel_gradientz<float>( std::vector<int> & grid, std::vector<int> & block,
const float * imgr, float * imgo,
int n0, int n1, int n2);
template void cuda_kernel_convolution_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * imgr, const float * kern, //kernel width
float * imgo, int n0, int n1, int kw0, int kw1);
template void cuda_kernel_convolution_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * imgr, const float * kern, //kernel width
float * imgo, int n0, int n1, int n2, int kw0, int kw1, int kw2);
template void cuda_kernel_assign<double>( std::vector<int> & grid, std::vector<int> & block,
double * vin, double value, int n );
template void cuda_kernel_copy<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, int n );
template void cuda_kernel_add<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_sub<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_mul<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_div<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_pow<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_add_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n );
template void cuda_kernel_sub_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n );
template void cuda_kernel_sub_scalar_inv<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n );
template void cuda_kernel_mul_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n );
template void cuda_kernel_div_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n );
template void cuda_kernel_div_scalar_inv<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n );
template void cuda_kernel_pow_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n );
template void cuda_kernel_pow_scalar_inv<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n );
template void cuda_kernel_equal<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_greater<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_less<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_greater_equal<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_less_equal<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_equal_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n);
template void cuda_kernel_greater_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n);
template void cuda_kernel_less_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n);
template void cuda_kernel_greater_equal_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n);
template void cuda_kernel_less_equal_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n);
template void cuda_kernel_replace<double>( std::vector<int> & grid, std::vector<int> & block,
const double * idxs, const double * vin, double * vout, int n);
template void cuda_kernel_replace_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * idxs, double * vout, double value, int n);
template void cuda_kernel_sum<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, int n );
template void cuda_kernel_min<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, int n );
template void cuda_kernel_max<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, int n );
template void cuda_kernel_pad_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, int start0, int start1,
int end0, int end1, int n0, int n1 );
template void cuda_kernel_unpad_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, int start0, int start1,
int end0, int end1, int n0, int n1);
template void cuda_kernel_pad_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2);
template void cuda_kernel_unpad_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2);
template void cuda_kernel_grid_2d<double>( std::vector<int> & grid, std::vector<int> & block,
double * x, double * y, double * sod,
int n0, int n1);
template void cuda_kernel_grid_3d<double>( std::vector<int> & grid, std::vector<int> & block,
double * x, double * y, double * z, double * sod,
int n0, int n1, int n2);
template void cuda_kernel_affine_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xin, const double * yin,
double * xout, double * yout,
const double * param, int n);
template void cuda_kernel_affine_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xin, const double * yin, const double * zin,
double * xout, double * yout, double * zout,
const double * param, int n) ;
template void cuda_kernel_affine_sod_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xin, const double * yin,
double * xout, double * yout,
const double * sod, int n);
template void cuda_kernel_affine_sod_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xin, const double * yin, const double * zin,
double * xout, double * yout, double * zout,
const double * sod, int n);
template void cuda_kernel_dfield_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xin, const double * yin, // grid coordinates
const double * x, const double * y, // vector field
double * xout, double * yout, int n);
template void cuda_kernel_dfield_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xin, const double * yin, const double * zin, // grid coordinates
const double * x, const double * y, const double * z, // vector field
double * xout, double * yout, double * zout, int n);
template void cuda_kernel_nearest_interpolation_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xo, const double * yo,
const double * imgr, double * imgo,
int w, int h, //img ref width and height
int n0, int n1); //img out dims
template void cuda_kernel_nearest_interpolation_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xo, const double * yo, const double * zo,
const double * imgr, double * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2);
template void cuda_kernel_linear_interpolation_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xo, const double * yo,
const double * imgr, double * imgo,
int w, int h, //img ref width and height
int n0, int n1); //img out dims
template void cuda_kernel_linear_interpolation_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xo, const double * yo, const double * zo,
const double * imgr, double * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2);
template void cuda_kernel_cubic_interpolation_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xo, const double * yo,
const double * imgr, double * imgo,
int w, int h, //img ref width and height
int n0, int n1); //img out dims
template void cuda_kernel_cubic_interpolation_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xo, const double * yo, const double * zo,
const double * imgr, double * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2);
template void cuda_kernel_gradientx<double>( std::vector<int> & grid, std::vector<int> & block,
const double * imgr, double * imgo,
int n0, int n1, int n2);
template void cuda_kernel_gradienty<double>( std::vector<int> & grid, std::vector<int> & block,
const double * imgr, double * imgo,
int n0, int n1, int n2);
template void cuda_kernel_gradientz<double>( std::vector<int> & grid, std::vector<int> & block,
const double * imgr, double * imgo,
int n0, int n1, int n2);
template void cuda_kernel_convolution_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * imgr, const double * kern, //kernel width
double * imgo, int n0, int n1, int kw0, int kw1);
template void cuda_kernel_convolution_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * imgr, const double * kern, //kernel width
double * imgo, int n0, int n1, int n2, int kw0, int kw1, int kw2);
template void cuda_kernel_assign<int>( std::vector<int> & grid, std::vector<int> & block,
int * vin, int value, int n );
template void cuda_kernel_copy<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int n );
template void cuda_kernel_add<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_sub<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_mul<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_div<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_pow<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_add_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n );
template void cuda_kernel_sub_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n );
template void cuda_kernel_sub_scalar_inv<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n );
template void cuda_kernel_mul_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n );
template void cuda_kernel_div_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n );
template void cuda_kernel_div_scalar_inv<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n );
template void cuda_kernel_pow_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n );
template void cuda_kernel_pow_scalar_inv<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n );
template void cuda_kernel_equal<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_greater<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_less<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_greater_equal<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_less_equal<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_equal_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n);
template void cuda_kernel_greater_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n);
template void cuda_kernel_less_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n);
template void cuda_kernel_greater_equal_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n);
template void cuda_kernel_less_equal_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n);
template void cuda_kernel_replace<int>( std::vector<int> & grid, std::vector<int> & block,
const int * idxs, const int * vin, int * vout, int n);
template void cuda_kernel_replace_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * idxs, int * vout, int value, int n);
template void cuda_kernel_sum<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int n );
template void cuda_kernel_min<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int n );
template void cuda_kernel_max<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int n );
template void cuda_kernel_pad_2d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int start0, int start1,
int end0, int end1, int n0, int n1 );
template void cuda_kernel_unpad_2d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int start0, int start1,
int end0, int end1, int n0, int n1 );
template void cuda_kernel_pad_3d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2 );
template void cuda_kernel_unpad_3d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2 );
template void cuda_kernel_grid_2d<int>( std::vector<int> & grid, std::vector<int> & block,
int * x, int * y, double * sod,
int n0, int n1 );
template void cuda_kernel_grid_3d<int>( std::vector<int> & grid, std::vector<int> & block,
int * x, int * y, int * z, double * sod,
int n0, int n1, int n2 );
template void cuda_kernel_affine_2d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * xin, const int * yin,
int * xout, int * yout,
const int * param, int n );
template void cuda_kernel_affine_3d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * xin, const int * yin, const int * zin,
int * xout, int * yout, int * zout,
const int * param, int n );
template void cuda_kernel_affine_sod_2d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * xin, const int * yin,
int * xout, int * yout,
const double * sod, int n);
template void cuda_kernel_affine_sod_3d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * xin, const int * yin, const int * zin,
int * xout, int * yout, int * zout,
const double * sod, int n );
template void cuda_kernel_dfield_2d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * xin, const int * yin, // grid coordinates
const int * x, const int * y, // vector field
int * xout, int * yout, int n );
template void cuda_kernel_dfield_3d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * xin, const int * yin, const int * zin, // grid coordinates
const int * x, const int * y, const int * z, // vector field
int * xout, int * yout, int * zout, int n );
// template void cuda_kernel_nearest_interpolation_2d<int>( std::vector<int> & grid, std::vector<int> & block,
// const int * xo, const int * yo,
// const int * imgr, int * imgo,
// int w, int h, //img ref width and height
// int n0, int n1); //img out dims
// template void cuda_kernel_nearest_interpolation_3d<int>( std::vector<int> & grid, std::vector<int> & block,
// const int * xo, const int * yo, const int * zo,
// const int * imgr, int * imgo,
// int w, int h, int l, //img ref width, height and length
// int n0, int n1, int n2);
// template void cuda_kernel_linear_interpolation_2d<int>( std::vector<int> & grid, std::vector<int> & block,
// const int * xo, const int * yo,
// const int * imgr, int * imgo,
// int w, int h, //img ref width and height
// int n0, int n1); //img out dims
// template void cuda_kernel_linear_interpolation_3d<int>( std::vector<int> & grid, std::vector<int> & block,
// const int * xo, const int * yo, const int * zo,
// const int * imgr, int * imgo,
// int w, int h, int l, //img ref width, height and length
// int n0, int n1, int n2);
template void cuda_kernel_gradientx<int>( std::vector<int> & grid, std::vector<int> & block,
const int * imgr, int * imgo,
int n0, int n1, int n2);
template void cuda_kernel_gradienty<int>( std::vector<int> & grid, std::vector<int> & block,
const int * imgr, int * imgo,
int n0, int n1, int n2);
template void cuda_kernel_gradientz<int>( std::vector<int> & grid, std::vector<int> & block,
const int * imgr, int * imgo,
int n0, int n1, int n2);
template void cuda_kernel_convolution_2d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * imgr, const int * kern, //kernel width
int * imgo, int n0, int n1, int kw0, int kw1);
template void cuda_kernel_convolution_3d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * imgr, const int * kern, //kernel width
int * imgo, int n0, int n1, int n2, int kw0, int kw1, int kw2);
template void cuda_kernel_assign<unsigned short>( std::vector<int> & grid, std::vector<int> & block,
unsigned short * vin, unsigned short value, int n );
template void cuda_kernel_copy<unsigned short>( std::vector<int> & grid, std::vector<int> & block,
const unsigned short * vin, unsigned short * vout, int n );
template void cuda_kernel_assign<unsigned int>( std::vector<int> & grid, std::vector<int> & block,
unsigned int * vin, unsigned int value, int n );
template void cuda_kernel_copy<unsigned int>( std::vector<int> & grid, std::vector<int> & block,
const unsigned int * vin, unsigned int * vout, int n );
template void cuda_kernel_assign<unsigned char>( std::vector<int> & grid, std::vector<int> & block,
unsigned char * vin, unsigned char value, int n );
template void cuda_kernel_copy<unsigned char>( std::vector<int> & grid, std::vector<int> & block,
const unsigned char * vin, unsigned char * vout, int n );
template void cuda_kernel_assign<short>( std::vector<int> & grid, std::vector<int> & block,
short * vin, short value, int n );
template void cuda_kernel_copy<short>( std::vector<int> & grid, std::vector<int> & block,
const short * vin, short * vout, int n );
template void cuda_kernel_assign<char>( std::vector<int> & grid, std::vector<int> & block,
char * vin, char value, int n );
template void cuda_kernel_copy<char>( std::vector<int> & grid, std::vector<int> & block,
const char * vin, char * vout, int n );
| bc671647953be6b44f15b34af8d43c9bf5bf08ca.cu | /*
* @Author: jose
* @Date: 2020-08-24 00:00:00
* @Last Modified by: jose
* @Last Modified time: 2020-08-24 00:00:00
*/
// local libs
#include "kernels.cuh"
#include <cufft.h>
// ===========================================
// Check Errors
// ===========================================
#define imart_assert_kernel(status, msg) \
imart_assert_kernel_error((status), __FILE__, __LINE__, msg);
void imart_assert_kernel_error(cudaError_t code, const char *file, int line, const char* msg, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"\n******* CUDA Error *******"\
"\n[Error] Information:\t%s"\
"\n[Error] Error code:\t%i"\
"\n[Error] Description:\t%s"\
"\n[Error] File:\t\t%s"\
"\n[Error] Line:\t\t%d\n",
msg, code, cudaGetErrorString(code), file, line);
if (abort) exit(code);
};
};
// ===========================================
// Kernels
// ===========================================
// ===========================================
// Data Kernels
// ===========================================
template <typename type>
__global__ void kernel_assign(type * vin, type value, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vin[i] = value;
};
template <typename type>
__global__ void kernel_copy(const type * vin, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin[i];
};
template <typename typein, typename typeout>
__global__ void kernel_cast(const typein * vin, typeout * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = typeout(vin[i]);
};
// ===========================================
// Vector Kernels
// ===========================================
template <typename type>
__global__ void kernel_add_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin[i] + scalar;
};
template <typename type>
__global__ void kernel_sub_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin[i] - scalar;
};
template <typename type>
__global__ void kernel_sub_scalar_inv(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = scalar - vin[i];
};
template <typename type>
__global__ void kernel_mul_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin[i] * scalar;
};
template <typename type>
__global__ void kernel_div_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin[i] / scalar;
};
template <typename type>
__global__ void kernel_div_scalar_inv(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = scalar / vin[i];
};
template <typename type>
__global__ void kernel_pow_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = pow( vin[i], scalar );
};
template <typename type>
__global__ void kernel_pow_scalar_inv(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = pow( scalar, vin[i] );
};
template <typename type>
__global__ void kernel_add(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin1[i] + vin2[i];
};
template <typename type>
__global__ void kernel_sub(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin1[i] - vin2[i];
};
template <typename type>
__global__ void kernel_mul(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin1[i] * vin2[i];
};
template <typename type>
__global__ void kernel_div(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin1[i] / vin2[i];
};
template <typename type>
__global__ void kernel_pow(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = pow( vin1[i], vin2[i] );
};
template <typename type>
__global__ void kernel_equal(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = (vin1[i] == vin2[i]);
};
template <typename type>
__global__ void kernel_greater(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = (vin1[i] > vin2[i]);
};
template <typename type>
__global__ void kernel_less(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = (vin1[i] < vin2[i]);
};
template <typename type>
__global__ void kernel_greater_equal(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin1[i] >= vin2[i];
};
template <typename type>
__global__ void kernel_less_equal(const type * vin1, const type * vin2, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = vin1[i] <= vin2[i];
};
template <typename type>
__global__ void kernel_equal_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = (vin[i] == scalar);
};
template <typename type>
__global__ void kernel_greater_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = (vin[i] > scalar);
};
template <typename type>
__global__ void kernel_less_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = (vin[i] < scalar);
};
template <typename type>
__global__ void kernel_greater_equal_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = (vin[i] >= scalar);
};
template <typename type>
__global__ void kernel_less_equal_scalar(const type * vin, type * vout, type scalar, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) vout[i] = (vin[i] <= scalar);
};
template <typename type>
__global__ void kernel_replace(const type * idxs, const type * vin, type * vout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
if (idxs[i]) vout[i] = vin[i];
};
};
template <typename type>
__global__ void kernel_replace_scalar(const type * idxs, type * vout, type value, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
if (idxs[i]) vout[i] = value;
};
};
// ===========================================
// Reduction Kernels
// ===========================================
template <typename type>
__global__ void kernel_sum(const type *vin, type *vout, int n)
{
__shared__ type sdata[256]; // Warning, threads should be 256
unsigned int iii = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
type sum = 0;
for (unsigned int i = iii; i < n; i += gridDim.x * blockDim.x)
{
sum += vin[i];
};
sdata[tid] = sum;
__syncthreads();
for (unsigned int s = blockDim.x >> 1; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
};
__syncthreads();
};
if (tid == 0) vout[blockIdx.x] = sdata[0];
};
template <typename type>
__global__ void kernel_min(const type *vin, type *vout, int n)
{
__shared__ type sdata[256];
unsigned int iii = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
type thread_result = vin[0];
for (unsigned int i = iii; i < n; i += gridDim.x * blockDim.x)
{
type tmp = vin[i];
thread_result = thread_result < tmp ? thread_result : tmp;
};
sdata[tid] = thread_result;
__syncthreads();
for (unsigned int s = blockDim.x >> 1; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = sdata[tid] < sdata[tid + s]? sdata[tid] : sdata[tid + s];
};
__syncthreads();
};
if (tid == 0) vout[blockIdx.x] = sdata[0];
};
template <typename type>
__global__ void kernel_max(const type *vin, type *vout, int n)
{
__shared__ type sdata[256];
unsigned int iii = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
type thread_result = vin[0];
for (unsigned int i = iii; i < n; i += gridDim.x * blockDim.x)
{
type tmp = vin[i];
thread_result = thread_result > tmp ? thread_result : tmp;
};
sdata[tid] = thread_result;
__syncthreads();
for (unsigned int s = blockDim.x >> 1; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = sdata[tid] > sdata[tid + s]? sdata[tid] : sdata[tid + s];
};
__syncthreads();
};
if (tid == 0) vout[blockIdx.x] = sdata[0];
};
// ===========================================
// Image Kernels
// ===========================================
template <typename type>
__global__ void kernel_pad_2d(const type * vin, type * vout, int start0, int start1,
int end0, int end1, int n0, int n1)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int wo = n0+start0+end0;
if (i < n0 && j < n1) // width = n0, heigth = n1
{
vout[start0+i + (start1+j)*wo] = vin[i + j*n0];
};
};
template <typename type>
__global__ void kernel_unpad_2d(const type * vin, type * vout, int start0, int start1,
int end0, int end1, int n0, int n1)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int wo = n0+start0+end0;
if (i < n0 && j < n1) // width = n0, heigth = n1
{
vout[i + j*n0] = vin[start0+i + (start1+j)*wo];
};
};
template <typename type>
__global__ void kernel_pad_3d(const type * vin, type * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2)
{
// int blockIdx_z = __float2int_rd(blockIdx.y * invBlocksInY);
// int blockIdx_y = blockIdx.y - (blockIdx_z * blocksInY);
// int i = (blockIdx.x * blockDim.x) + threadIdx.x;
// int j = (blockIdx_y * blockDim.y) + threadIdx.y;
// int k = (blockIdx_z * blockDim.z) + threadIdx.z;
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
int wo = n0+start0+end0; //vout size
int ho = n1+start1+end1; //vout size
if (i < n0 && j < n1 && k < n2) // width = n0, height = n1, depth = n2
{
vout[start0+i + (start1+j)*wo + (start2+k)*wo*ho] = vin[i + j*n0 + k*n0*n1];
};
};
template <typename type>
__global__ void kernel_unpad_3d(const type * vin, type * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
int wo = n0+start0+end0; //vout size
int ho = n1+start1+end1; //vout size
if (i < n0 && j < n1 && k < n2) // width = n0, height = n1, depth = n2
{
vout[i + j*n0 + k*n0*n1] = vin[start0+i + (start1+j)*wo + (start2+k)*wo*ho];
};
};
template <typename type>
__global__ void kernel_grid_2d( type * x, type * y, double * sod,
int n0, int n1)
{
// consider sod conversion to float to support all gpu
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
double c0 = sod[0]; double c1 = sod[1];
double o0 = sod[2]; double o1 = sod[3];
double d0 = sod[4]; double d1 = sod[5];
double d2 = sod[6]; double d3 = sod[7];
if (i < n0 && j < n1) // width = n0, heigth = n1
{
x[i+j*n0] = (type)(d0*c0*i + d1*c1*j + o0);
y[i+j*n0] = (type)(d2*c0*i + d3*c1*j + o1);
};
};
template <typename type>
__global__ void kernel_grid_3d( type * x, type * y, type * z, double * sod,
int n0, int n1, int n2)
{
// consider sod conversion to float to support all gpu
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
double c0 = sod[0]; double c1 = sod[1]; double c2 = sod[2];
double o0 = sod[3]; double o1 = sod[4]; double o2 = sod[5];
double d0 = sod[6]; double d1 = sod[7]; double d2 = sod[8];
double d3 = sod[9]; double d4 = sod[10]; double d5 = sod[11];
double d6 = sod[12]; double d7 = sod[13]; double d8 = sod[14];
if (i < n0 && j < n1 && k < n2) // width = n0, height = n1, depth = n2
{
x[i + j*n0 + k*n0*n1] = (type)(d0*c0*i + d1*c1*j + d2*c2*k + o0);
y[i + j*n0 + k*n0*n1] = (type)(d3*c0*i + d4*c1*j + d5*c2*k + o1);
z[i + j*n0 + k*n0*n1] = (type)(d6*c0*i + d7*c1*j + d8*c2*k + o2);
};
};
template <typename type>
__global__ void kernel_affine_2d( const type * xin, const type * yin,
type * xout, type * yout,
const type * param, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xy equal size
type a0 = param[0]; type a1 = param[1];
type a2 = param[2]; type a3 = param[3];
type t0 = param[4]; type t1 = param[5];
if (i < n)
{
xout[i] = (type)(a0*xin[i] + a1*yin[i] + t0);
yout[i] = (type)(a2*xin[i] + a3*yin[i] + t1);
};
};
template <typename type>
__global__ void kernel_affine_3d( const type * xin, const type * yin, const type * zin,
type * xout, type * yout, type * zout,
const type * param, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xyz equal size
type a0 = param[0]; type a1 = param[1]; type a2 = param[2];
type a3 = param[3]; type a4 = param[4]; type a5 = param[5];
type a6 = param[6]; type a7 = param[7]; type a8 = param[8];
type t0 = param[9]; type t1 = param[10]; type t2 = param[11];
if (i < n)
{
xout[i] = (type)(a0*xin[i] + a1*yin[i] + a2*zin[i] + t0);
yout[i] = (type)(a3*xin[i] + a4*yin[i] + a5*zin[i] + t1);
zout[i] = (type)(a6*xin[i] + a7*yin[i] + a8*zin[i] + t2);
};
};
template <typename type>
__global__ void kernel_affine_sod_2d( const type * xin, const type * yin,
type * xout, type * yout,
const double * sod, int n)
{
// consider sod conversion to float to support all gpu
int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xy equal size
double c0 = sod[0]; double c1 = sod[1];
double o0 = sod[2]; double o1 = sod[3];
double d0 = sod[4]; double d1 = sod[5];
double d2 = sod[6]; double d3 = sod[7];
if (i < n)
{
xout[i] = (type)(d0*c0*xin[i] + d1*c1*yin[i] + o0);
yout[i] = (type)(d2*c0*xin[i] + d3*c1*yin[i] + o1);
}
};
template <typename type>
__global__ void kernel_affine_sod_3d( const type * xin, const type * yin, const type * zin,
type * xout, type * yout, type * zout,
const double * sod, int n)
{
// consider sod conversion to float to support all gpu
int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xyz equal size
double c0 = sod[0]; double c1 = sod[1]; double c2 = sod[2];
double o0 = sod[3]; double o1 = sod[4]; double o2 = sod[5];
double d0 = sod[6]; double d1 = sod[7]; double d2 = sod[8];
double d3 = sod[9]; double d4 = sod[10]; double d5 = sod[11];
double d6 = sod[12]; double d7 = sod[13]; double d8 = sod[14];
if (i < n)
{
xout[i] = (type)(d0*c0*xin[i] + d1*c1*yin[i] + d2*c2*zin[i] + o0);
yout[i] = (type)(d3*c0*xin[i] + d4*c1*yin[i] + d5*c2*zin[i] + o1);
zout[i] = (type)(d6*c0*xin[i] + d7*c1*yin[i] + d8*c2*zin[i] + o2);
};
};
template <typename type>
__global__ void kernel_dfield_2d( const type * xin, const type * yin, // grid coordinates
const type * x, const type * y, // vector field
type * xout, type * yout, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xy equal size
if (i < n)
{
xout[i] = xin[i] + x[i];
yout[i] = yin[i] + y[i];
};
};
template <typename type>
__global__ void kernel_dfield_3d( const type * xin, const type * yin, const type * zin, // grid coordinates
const type * x, const type * y, const type * z, // vector field
type * xout, type * yout, type * zout, // output coordinates
int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xy equal size
if (i < n)
{
xout[i] = xin[i] + x[i];
yout[i] = yin[i] + y[i];
zout[i] = zin[i] + z[i];
};
};
template <typename type>
__global__ void kernel_nearest_interpolation_2d( const type * xo, const type * yo,
const type * imgr, type * imgo,
int w, int h, //img ref width and height
int n0, int n1) //img out dims
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < n0 && j < n1)
{
int x = round(xo[i + j*n0]);
int y = round(yo[i + j*n0]);
if(x >= 0 && x < w && y >= 0 && y < h)
{
imgo[i + j*n0] = imgr[x + y*w];
};
};
};
template <typename type>
__global__ void kernel_nearest_interpolation_3d( const type * xo, const type * yo, const type * zo,
const type * imgr, type * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
if (i < n0 && j < n1 && k < n2)
{
int x = round(xo[i + j*n0 + k*n0*n1]);
int y = round(yo[i + j*n0 + k*n0*n1]);
int z = round(yo[i + j*n0 + k*n0*n1]);
if(x >= 0 && x < w && y >= 0 && y < h && z >= 0 && z < l)
{
imgo[i + j*n0 + k*n0*n1] = imgr[x + y*w + z*w*h];
};
};
};
template <typename type>
__global__ void kernel_linear_interpolation_2d( const type * xo, const type * yo,
const type * imgr, type * imgo,
int w, int h, //img ref width and height
int n0, int n1) //img out dims
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < n0 && j < n1)
{
type zero = 0.01;
int idx = i + j*n0;
type xt = xo[idx];
type yt = yo[idx];
int x = floor(xt);
int y = floor(yt);
if(x >= 0 && x < w && y >= 0 && y < h)
{
type dx = xt - (type)x;
type dy = yt - (type)y;
if (dx < zero && dy < zero)
{
imgo[idx] = imgr[x+y*w];
}
else if (dy < zero || y >= h - 1) // same y
{
imgo[idx] = imgr[x+y*w]*(1-dx) + imgr[x+1+y*w]*(dx);
}
else if (dx < zero || x >= w - 1) // same x
{
imgo[idx] = imgr[x+y*w]*(1-dy) + imgr[x+(y+1)*w]*(dy);
}
else
{
// compute case x & y
type dxdy = dx*dy;
type r = imgr[x+y*w]*(1-dx-dy+dxdy) + imgr[x+1+y*w]*(dx-dxdy) + imgr[x+(y+1)*w]*(dy-dxdy) + imgr[x+1+(y+1)*w]*dxdy;
imgo[idx] = r;
};
};
};
};
// template <typename type>
// __global__ void kernel_linear_interpolation_2d( const type * xo, const type * yo,
// const type * imgr, type * imgo,
// int w, int h, //img ref width and height
// int n0, int n1) //img out dims
// {
// int i = blockDim.x * blockIdx.x + threadIdx.x;
// int j = blockDim.y * blockIdx.y + threadIdx.y;
// if (i < n0 && j < n1)
// {
// type xt = xo[i + j*n0];
// type yt = yo[i + j*n0];
// int x = floor(xt);
// int y = floor(yt);
// if(x >= 0 && x < w && y >= 0 && y < h - 1)
// {
// // __shared__ iv[4];
// type iv[4] = {imgr[x+y*w], imgr[x+1+y*w], imgr[x+(y+1)*w], imgr[x+1+(y+1)*w]};
// type dx = xt - (type)x;
// type dy = yt - (type)y;
// type dxdy = dx*dy;
// type r = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy;
// imgo[i + j*n0] = r;
// }
// else if(x >= 0 && x < w && y == h - 1) // border case
// {
// type iv[2] = {imgr[x+y*w], imgr[x+1+y*w]};
// type dx = xt - (type)x;
// type r = iv[0]*(1-dx) + iv[1]*(dx);
// imgo[i + j*n0] = r;
// };
// };
// };
template <typename type>
__global__ void kernel_linear_interpolation_3d( const type * xo, const type * yo, const type * zo,
const type * imgr, type * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
if (i < n0 && j < n1 && k < n2)
{
type zero = 0.01;
int idx = i + j*n0 + k*n0*n1;
type xt = xo[idx];
type yt = yo[idx];
type zt = zo[idx];
int x = floor(xt);
int y = floor(yt);
int z = floor(zt);
if(x >= 0 && x < w && y >= 0 && y < h && z >= 0 && z < l)
{
type dx = xt - (type)x;
type dy = yt - (type)y;
type dz = zt - (type)z;
if (dx <= zero && dy <= zero && dz <= zero)
{
imgo[idx] = imgr[x+y*w+z*w*h];
}
else if (dz <= zero || z >= l - 1) // same z
{
if (dy <= zero || y >= h - 1) // same y
{
imgo[idx] = imgr[x+y*w+z*w*h]*(1-dx) + imgr[x+1+y*w+z*w*h]*(dx);
}
else if (dx <= zero || x >= w - 1) // same x
{
imgo[idx] = imgr[x+y*w+z*w*h]*(1-dy) + imgr[x+(y+1)*w+z*w*h]*(dy);
}
else
{
// compute case x & y
type dxdy = dx*dy;
type r = imgr[x+y*w+z*w*h]*(1-dx-dy+dxdy) + imgr[x+1+y*w+z*w*h]*(dx-dxdy) + imgr[x+(y+1)*w+z*w*h]*(dy-dxdy) + imgr[x+1+(y+1)*w+z*w*h]*dxdy;
imgo[idx] = r;
};
}
else if (dy <= zero || y >= h - 1) // same y
{
if (dx <= zero || x >= w - 1) // same x
{
imgo[idx] = imgr[x+y*w+z*w*h]*(1-dz) + imgr[x+y*w+(z+1)*w*h]*(dz);
}
else
{
// compute case x & z
type dxdz = dx*dz;
type r = imgr[x+y*w+z*w*h]*(1-dx-dz+dxdz) + imgr[x+1+y*w+z*w*h]*(dx-dxdz) + imgr[x+y*w+(z+1)*w*h]*(dz-dxdz) + imgr[x+1+y*w+(z+1)*w*h]*dxdz;
imgo[idx] = r;
};
}
else if (dx <= zero || x >= w - 1) // same x
{
// compute case y & z
type dydz = dy*dz;
type r = imgr[x+y*w+z*w*h]*(1-dy-dz+dydz) + imgr[x+(y+1)*w+z*w*h]*(dy-dydz) + imgr[x+y*w+(z+1)*w*h]*(dz-dydz) + imgr[x+(y+1)*w+(z+1)*w*h]*dydz;
imgo[idx] = r;
}
else
{
// compute case x & y & z
type dxdy = dx*dy;
type rv = imgr[x+y*w+z*w*h]*(1-dx-dy+dxdy) + imgr[x+1+y*w+z*w*h]*(dx-dxdy) + imgr[x+(y+1)*w+z*w*h]*(dy-dxdy) + imgr[x+1+(y+1)*w+z*w*h]*dxdy;
type rw = imgr[x+y*w+(z+1)*w*h]*(1-dx-dy+dxdy) + imgr[x+1+y*w+(z+1)*w*h]*(dx-dxdy) + imgr[x+(y+1)*w+(z+1)*w*h]*(dy-dxdy) + imgr[x+1+(y+1)*w+(z+1)*w*h]*dxdy;
type r = rv*(1-dz) + rw*dz;
imgo[idx] = r;
};
};
};
};
// template <typename type>
// __global__ void kernel_linear_interpolation_3d( const type * xo, const type * yo, const type * zo,
// const type * imgr, type * imgo,
// int w, int h, int l, //img ref width, height and length
// int n0, int n1, int n2)
// {
// int i = (blockIdx.x * blockDim.x) + threadIdx.x;
// int j = (blockIdx.y * blockDim.y) + threadIdx.y;
// int k = (blockIdx.z * blockDim.z) + threadIdx.z;
// if (i < n0 && j < n1 && k < n2)
// {
// type xt = xo[i + j*n0 + k*n0*n1];
// type yt = yo[i + j*n0 + k*n0*n1];
// type zt = zo[i + j*n0 + k*n0*n1];
// int x = floor(xt);
// int y = floor(yt);
// int z = floor(zt);
// if(x >= 0 && x < w && y >= 0 && y < h && z >= 0 && z < l-1)
// {
// type iv[4] = {imgr[x+y*w+z*w*h], imgr[x+1+y*w+z*w*h], imgr[x+(y+1)*w+z*w*h], imgr[x+1+(y+1)*w+z*w*h]};
// type iw[4] = {imgr[x+y*w+(z+1)*w*h], imgr[x+1+y*w+(z+1)*w*h], imgr[x+(y+1)*w+(z+1)*w*h], imgr[x+1+(y+1)*w+(z+1)*w*h]};
// type dx = xt - (type)x;
// type dy = yt - (type)y;
// type dxdy = dx*dy;
// type rv = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy;
// type rw = iw[0]*(1-dx-dy+dxdy) + iw[1]*(dx-dxdy) + iw[2]*(dy-dxdy) + iw[3]*dxdy;
// type dz = zt - (type)z;
// type r = rv*(1-dz) + rw*dz;
// imgo[i + j*n0 + k*n0*n1] = r;
// }
// else if(x >= 0 && x < w && y >= 0 && y < h && z == l-1) // border case
// {
// type iv[4] = {imgr[x+y*w+z*w*h], imgr[x+1+y*w+z*w*h], imgr[x+(y+1)*w+z*w*h], imgr[x+1+(y+1)*w+z*w*h]};
// type dx = xt - (type)x;
// type dy = yt - (type)y;
// type dxdy = dx*dy;
// type rv = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy;
// imgo[i + j*n0 + k*n0*n1] = rv;
// };
// };
// };
// template <typename type>
// __device__ void cubic(type p[4], type * x, type * out)
// {
// out[0] = p[1] + 0.5 * x[0]*(p[2] - p[0] + x[0]*(2.0*p[0] - 5.0*p[1] + 4.0*p[2] - p[3] + x[0]*(3.0*(p[1] - p[2]) + p[3] - p[0])));
// };
template <typename type>
__device__ type cubic(type p[4], type x)
{
return p[1] + 0.5 * x*(p[2] - p[0] + x*(2.0*p[0] - 5.0*p[1] + 4.0*p[2] - p[3] + x*(3.0*(p[1] - p[2]) + p[3] - p[0])));
};
template <typename type>
__global__ void kernel_cubic_interpolation_2d( const type * xo, const type * yo,
const type * imgr, type * imgo,
int w, int h, //img ref width and height
int n0, int n1) //img out dims
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < n0 && j < n1)
{
type xt = xo[i + j*n0];
type yt = yo[i + j*n0];
int x = floor(xt);
int y = floor(yt);
if(x >= 1 && x < w - 2 && y >= 1 && y < h - 2)
{
type dx = xt - (type)x;
type dy = yt - (type)y;
type r0[4] = {imgr[x-1+(y-1)*w], imgr[x+(y-1)*w], imgr[x+1+(y-1)*w], imgr[x+2+(y-1)*w]};
type r1[4] = {imgr[x-1+(y)*w] , imgr[x+(y)*w] , imgr[x+1+(y)*w] , imgr[x+2+(y)*w]};
type r2[4] = {imgr[x-1+(y+1)*w], imgr[x+(y+1)*w], imgr[x+1+(y+1)*w], imgr[x+2+(y+1)*w]};
type r3[4] = {imgr[x-1+(y+2)*w], imgr[x+(y+2)*w], imgr[x+1+(y+2)*w], imgr[x+2+(y+2)*w]};
type r[4] = {cubic(r0, dx), cubic(r1, dx), cubic(r2, dx), cubic(r3, dx) };
imgo[i + j*n0] = cubic(r, dy);
}
else if(x >= 0 && x < w && y >= 0 && y < h - 1)
{
// __shared__ iv[4];
type iv[4] = {imgr[x+y*w], imgr[x+1+y*w], imgr[x+(y+1)*w], imgr[x+1+(y+1)*w]};
type dx = xt - (type)x;
type dy = yt - (type)y;
type dxdy = dx*dy;
type r = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy;
imgo[i + j*n0] = r;
}
else if(x >= 0 && x < w && y == h - 1) // border case
{
type iv[2] = {imgr[x+y*w], imgr[x+1+y*w]};
type dx = xt - (type)x;
type r = iv[0]*(1-dx) + iv[1]*(dx);
imgo[i + j*n0] = r;
};
};
};
template <typename type>
__global__ void kernel_cubic_interpolation_3d( const type * xo, const type * yo, const type * zo,
const type * imgr, type * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
if (i < n0 && j < n1 && k < n2)
{
type xt = xo[i + j*n0 + k*n0*n1];
type yt = yo[i + j*n0 + k*n0*n1];
type zt = zo[i + j*n0 + k*n0*n1];
int x = floor(xt);
int y = floor(yt);
int z = floor(zt);
if(x >= 1 && x < w - 2 && y >= 1 && y < h - 2 && z >= 1 && z < l - 2)
{
type dx = xt - (type)x;
type dy = yt - (type)y;
type dz = zt - (type)z;
type r00[4] = {imgr[x-1+(y-1)*w+(z-1)*w*h], imgr[x+(y-1)*w+(z-1)*w*h], imgr[x+1+(y-1)*w+(z-1)*w*h], imgr[x+2+(y-1)*w+(z-1)*w*h]};
type r01[4] = {imgr[x-1+(y)*w+(z-1)*w*h] , imgr[x+(y)*w+(z-1)*w*h] , imgr[x+1+(y)*w+(z-1)*w*h] , imgr[x+2+(y)*w+(z-1)*w*h]};
type r02[4] = {imgr[x-1+(y+1)*w+(z-1)*w*h], imgr[x+(y+1)*w+(z-1)*w*h], imgr[x+1+(y+1)*w+(z-1)*w*h], imgr[x+2+(y+1)*w+(z-1)*w*h]};
type r03[4] = {imgr[x-1+(y+2)*w+(z-1)*w*h], imgr[x+(y+2)*w+(z-1)*w*h], imgr[x+1+(y+2)*w+(z-1)*w*h], imgr[x+2+(y+2)*w+(z-1)*w*h]};
type rx0[4] = {cubic(r00, dx), cubic(r01, dx), cubic(r02, dx), cubic(r03, dx)};
type r10[4] = {imgr[x-1+(y-1)*w+z*w*h], imgr[x+(y-1)*w+z*w*h], imgr[x+1+(y-1)*w+z*w*h], imgr[x+2+(y-1)*w+z*w*h]};
type r11[4] = {imgr[x-1+(y)*w+z*w*h] , imgr[x+(y)*w+z*w*h] , imgr[x+1+(y)*w+z*w*h] , imgr[x+2+(y)*w+z*w*h]};
type r12[4] = {imgr[x-1+(y+1)*w+z*w*h], imgr[x+(y+1)*w+z*w*h], imgr[x+1+(y+1)*w+z*w*h], imgr[x+2+(y+1)*w+z*w*h]};
type r13[4] = {imgr[x-1+(y+2)*w+z*w*h], imgr[x+(y+2)*w+z*w*h], imgr[x+1+(y+2)*w+z*w*h], imgr[x+2+(y+2)*w+z*w*h]};
type rx1[4] = {cubic(r10, dx), cubic(r11, dx), cubic(r12, dx), cubic(r13, dx)};
type r20[4] = {imgr[x-1+(y-1)*w+(z+1)*w*h], imgr[x+(y-1)*w+(z+1)*w*h], imgr[x+1+(y-1)*w+(z+1)*w*h], imgr[x+2+(y-1)*w+(z+1)*w*h]};
type r21[4] = {imgr[x-1+(y)*w+(z+1)*w*h] , imgr[x+(y)*w+(z+1)*w*h] , imgr[x+1+(y)*w+(z+1)*w*h] , imgr[x+2+(y)*w+(z+1)*w*h]};
type r22[4] = {imgr[x-1+(y+1)*w+(z+1)*w*h], imgr[x+(y+1)*w+(z+1)*w*h], imgr[x+1+(y+1)*w+(z+1)*w*h], imgr[x+2+(y+1)*w+(z+1)*w*h]};
type r23[4] = {imgr[x-1+(y+2)*w+(z+1)*w*h], imgr[x+(y+2)*w+(z+1)*w*h], imgr[x+1+(y+2)*w+(z+1)*w*h], imgr[x+2+(y+2)*w+(z+1)*w*h]};
type rx2[4] = {cubic(r20, dx), cubic(r21, dx), cubic(r22, dx), cubic(r23, dx)};
type r30[4] = {imgr[x-1+(y-1)*w+(z+2)*w*h], imgr[x+(y-1)*w+(z+2)*w*h], imgr[x+1+(y-1)*w+(z+2)*w*h], imgr[x+2+(y-1)*w+(z+2)*w*h]};
type r31[4] = {imgr[x-1+(y)*w+(z+2)*w*h] , imgr[x+(y)*w+(z+2)*w*h] , imgr[x+1+(y)*w+(z+2)*w*h] , imgr[x+2+(y)*w+(z+2)*w*h]};
type r32[4] = {imgr[x-1+(y+1)*w+(z+2)*w*h], imgr[x+(y+1)*w+(z+2)*w*h], imgr[x+1+(y+1)*w+(z+2)*w*h], imgr[x+2+(y+1)*w+(z+2)*w*h]};
type r33[4] = {imgr[x-1+(y+2)*w+(z+2)*w*h], imgr[x+(y+2)*w+(z+2)*w*h], imgr[x+1+(y+2)*w+(z+2)*w*h], imgr[x+2+(y+2)*w+(z+2)*w*h]};
type rx3[4] = {cubic(r30, dx), cubic(r31, dx), cubic(r32, dx), cubic(r33, dx)};
type ry[4] = {cubic(rx0, dy), cubic(rx1, dy), cubic(rx2, dy), cubic(rx3, dy)};
imgo[i + j*n0 + k*n0*n1] = cubic(ry, dz);
}
else if(x >= 0 && x < w && y >= 0 && y < h && z >= 0 && z < l-1)
{
type iv[4] = {imgr[x+y*w+z*w*h], imgr[x+1+y*w+z*w*h], imgr[x+(y+1)*w+z*w*h], imgr[x+1+(y+1)*w+z*w*h]};
type iw[4] = {imgr[x+y*w+(z+1)*w*h], imgr[x+1+y*w+(z+1)*w*h], imgr[x+(y+1)*w+(z+1)*w*h], imgr[x+1+(y+1)*w+(z+1)*w*h]};
type dx = xt - (type)x;
type dy = yt - (type)y;
type dxdy = dx*dy;
type rv = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy;
type rw = iw[0]*(1-dx-dy+dxdy) + iw[1]*(dx-dxdy) + iw[2]*(dy-dxdy) + iw[3]*dxdy;
type dz = zt - (type)z;
type r = rv*(1-dz) + rw*dz;
imgo[i + j*n0 + k*n0*n1] = r;
}
else if(x >= 0 && x < w && y >= 0 && y < h && z == l-1) // border case
{
type iv[4] = {imgr[x+y*w+z*w*h], imgr[x+1+y*w+z*w*h], imgr[x+(y+1)*w+z*w*h], imgr[x+1+(y+1)*w+z*w*h]};
type dx = xt - (type)x;
type dy = yt - (type)y;
type dxdy = dx*dy;
type rv = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy;
imgo[i + j*n0 + k*n0*n1] = rv;
};
};
};
template <typename type>
__global__ void kernel_gradientx( const type * imgr, type * imgo,
int n0, int n1, int n2)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
if (i < n0 && j < n1 && (k == 0 || k < n2))
{
if(i == 0)
{
imgo[i + j*n0 + k*n0*n1] = imgr[i+1 + j*n0 + k*n0*n1] - imgr[i + j*n0 + k*n0*n1];
}
else if(i == n0 - 1)
{
imgo[i + j*n0 + k*n0*n1] = imgr[i + j*n0 + k*n0*n1] - imgr[i-1 + j*n0 + k*n0*n1];
}
else
{
imgo[i + j*n0 + k*n0*n1] = 0.5*imgr[i+1 + j*n0 + k*n0*n1] - 0.5*imgr[i-1 + j*n0 + k*n0*n1];
};
};
};
template <typename type>
__global__ void kernel_gradienty( const type * imgr, type * imgo,
int n0, int n1, int n2)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
if (i < n0 && j < n1 && (k == 0 || k < n2))
{
if(j == 0)
{
imgo[i + j*n0 + k*n0*n1] = imgr[i + (j+1)*n0 + k*n0*n1] - imgr[i + j*n0 + k*n0*n1];
}
else if(j == n1 - 1)
{
imgo[i + j*n0 + k*n0*n1] = imgr[i + j*n0 + k*n0*n1] - imgr[i + (j-1)*n0 + k*n0*n1];
}
else
{
imgo[i + j*n0 + k*n0*n1] = 0.5*imgr[i + (j+1)*n0 + k*n0*n1] - 0.5*imgr[i + (j-1)*n0 + k*n0*n1];
};
};
};
template <typename type>
__global__ void kernel_gradientz( const type * imgr, type * imgo,
int n0, int n1, int n2)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
if (i < n0 && j < n1 && k < n2)
{
if(k == 0)
{
imgo[i + j*n0 + k*n0*n1] = imgr[i + j*n0 + (k+1)*n0*n1] - imgr[i + j*n0 + k*n0*n1];
}
else if(k == n2 - 1)
{
imgo[i + j*n0 + k*n0*n1] = imgr[i + j*n0 + k*n0*n1] - imgr[i + j*n0 + (k-1)*n0*n1];
}
else
{
imgo[i + j*n0 + k*n0*n1] = 0.5*imgr[i + j*n0 + (k+1)*n0*n1] - 0.5*imgr[i + j*n0 + (k-1)*n0*n1];
};
};
};
template <typename type>
__global__ void kernel_convolution_2d( const type * imgr, const type * kern, //kernel width
type * imgo, int n0, int n1, int kw0, int kw1)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int off0 = kw0>>1; int off1 = kw1>>1;
if(i >= off0 && i < n0 - off0 && j >= off1 && j < n1 - off1)
{
type sum = 0;
for (int p = 0; p < kw1; p++)
{
for (int q = 0; q < kw0; q++)
{
sum += imgr[i+p-off0 + (j+q-off1)*n0] * kern[p*kw0 + q];
};
};
imgo[i + j*n0] = sum;
};
};
template <typename type>
__global__ void kernel_convolution_3d( const type * imgr, const type * kern, //kernel width
type * imgo, int n0, int n1, int n2, int kw0, int kw1, int kw2)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int k = (blockIdx.z * blockDim.z) + threadIdx.z;
int off0 = kw0>>1; int off1 = kw1>>1; int off2 = kw2>>1;
if(i >= off0 && i < n0 - off0 && j >= off1 && j < n1 - off1 && k >= off2 && k < n2 - off2)
{
type sum = 0;
for (int r = 0; r < kw2; r++)
{
for (int p = 0; p < kw1; p++)
{
for (int q = 0; q < kw0; q++)
{
sum += imgr[i+q-off0 + (j+p-off1)*n0 + (k+r-off2)*n0*n1] * kern[r*kw0*kw1 + p*kw0 + q];
};
};
};
imgo[i + j*n0 + k*n0*n1] = sum;
};
};
// ===========================================
// Kernels Calls
// ===========================================
// ===========================================
// Data Kernels
// ===========================================
template <typename type>
void cuda_kernel_assign( std::vector<int> & grid, std::vector<int> & block,
type * vin, type value, int n )
{
// printf("kernel assign init\n");
// printf("block: [%i, %i, %i]\n", block[0], block[1] , block[2]);
// printf("grid: [%i, %i, %i]\n", grid[0], grid[1] , grid[2]);
// printf("address: %x\n", vin);
// printf("value: %f\n", value);
// printf("size: %i\n", n);
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_assign<<<grd,blk>>>(vin, value, n);
// kernel_assign<type><<<grd,blk>>>(vin, value, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel assign" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel assign" );
// printf("kernel assign finish\n");
};
template <typename type>
void cuda_kernel_copy( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_copy<<<grd,blk>>>(vin, vout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel copy" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel copy" );
};
template <typename typein, typename typeout>
void cuda_kernel_cast( std::vector<int> & grid, std::vector<int> & block,
const typein * vin, typeout * vout, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_cast <typein,typeout><<<grd,blk>>>(vin, vout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel cast" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel cast" );
};
// ===========================================
// Vector Kernels
// ===========================================
template <typename type>
void cuda_kernel_add_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_add_scalar<<<grd,blk>>>(vin, vout, scalar, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel add scalar" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel add scalar" );
};
template <typename type>
void cuda_kernel_sub_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_sub_scalar<<<grd,blk>>>(vin, vout, scalar, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel sub scalar" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel sub scalar" );
};
template <typename type>
void cuda_kernel_sub_scalar_inv( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_sub_scalar_inv<<<grd,blk>>>(vin, vout, scalar, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel sub scalar inv" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel sub scalar inv" );
};
template <typename type>
void cuda_kernel_mul_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_mul_scalar<<<grd,blk>>>(vin, vout, scalar, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel mul scalar" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel mul scalar" );
};
template <typename type>
void cuda_kernel_div_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_div_scalar<<<grd,blk>>>(vin, vout, scalar, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel div scalar" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel div scalar" );
};
template <typename type>
void cuda_kernel_div_scalar_inv( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_div_scalar_inv<<<grd,blk>>>(vin, vout, scalar, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel div scalar inv" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel div scalar inv" );
};
template <typename type>
void cuda_kernel_pow_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_pow_scalar<<<grd,blk>>>(vin, vout, scalar, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel pow scalar" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel pow scalar" );
};
template <typename type>
void cuda_kernel_pow_scalar_inv( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_pow_scalar_inv<<<grd,blk>>>(vin, vout, scalar, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel pow scalar inv" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel pow scalar inv" );
};
template <typename type>
void cuda_kernel_add( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_add<<<grd,blk>>>(vin1, vin2, vout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel add" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel add" );
};
template <typename type>
void cuda_kernel_sub( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_sub<<<grd,blk>>>(vin1, vin2, vout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel sub" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel sub" );
};
template <typename type>
void cuda_kernel_mul( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_mul<<<grd,blk>>>(vin1, vin2, vout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel mul" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel mul" );
};
template <typename type>
void cuda_kernel_div( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_div<<<grd,blk>>>(vin1, vin2, vout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel div" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel div" );
};
template <typename type>
void cuda_kernel_pow( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n )
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_pow<<<grd,blk>>>(vin1, vin2, vout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel pow" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel pow" );
};
template <typename type>
void cuda_kernel_equal( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_equal<<<grd,blk>>>(vin1, vin2, vout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel equal" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel equal" );
};
template <typename type>
void cuda_kernel_greater( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_greater<<<grd,blk>>>(vin1, vin2, vout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel" );
};
template <typename type>
void cuda_kernel_less( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_less<<<grd,blk>>>(vin1, vin2, vout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel" );
};
template <typename type>
void cuda_kernel_greater_equal( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_greater_equal<<<grd,blk>>>(vin1, vin2, vout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel greater equal" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel greater equal" );
};
template <typename type>
void cuda_kernel_less_equal( std::vector<int> & grid, std::vector<int> & block,
const type * vin1, const type * vin2, type * vout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_less_equal<<<grd,blk>>>(vin1, vin2, vout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel less equal" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel less equal" );
};
template <typename type>
void cuda_kernel_equal_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_equal_scalar<<<grd,blk>>>(vin, vout, scalar, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel equal scalar" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel equal scalar" );
};
template <typename type>
void cuda_kernel_greater_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_greater_scalar<<<grd,blk>>>(vin, vout, scalar, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel greater scalar" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel greater scalar" );
};
template <typename type>
void cuda_kernel_less_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_less_scalar<<<grd,blk>>>(vin, vout, scalar, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel less scalar" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel less scalar" );
};
template <typename type>
void cuda_kernel_greater_equal_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_greater_equal_scalar<<<grd,blk>>>(vin, vout, scalar, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel greater equal scalar" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel greater equal scalar" );
};
template <typename type>
void cuda_kernel_less_equal_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, type scalar, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_less_equal_scalar<<<grd,blk>>>(vin, vout, scalar, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel less equal scalar" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel less equal scalar" );
};
template <typename type>
void cuda_kernel_replace( std::vector<int> & grid, std::vector<int> & block,
const type * idxs, const type * vin, type * vout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_replace<<<grd,blk>>>(idxs, vin, vout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel replace" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel replace" );
};
template <typename type>
void cuda_kernel_replace_scalar( std::vector<int> & grid, std::vector<int> & block,
const type * idxs, type * vout, type value, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_replace_scalar<<<grd,blk>>>(idxs, vout, value, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel replace" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel replace" );
};
// ===========================================
// Reduction Kernels
// ===========================================
template <typename type>
void cuda_kernel_sum( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, int n)
{
// printf("kernel sum init\n");
// printf("block: [%i, %i, %i]\n", block[0], block[1] , block[2]);
// printf("grid: [%i, %i, %i]\n", grid[0], grid[1] , grid[2]);
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_sum<<<grd,blk>>>(vin, vout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel sum" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel sum" );
};
template <typename type>
void cuda_kernel_min( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_min<<<grd,blk>>>(vin, vout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel min" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel min" );
};
template <typename type>
void cuda_kernel_max( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_max<<<grd,blk>>>(vin, vout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel max" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel max" );
};
// ===========================================
// Image Kernels
// ===========================================
template <typename type>
void cuda_kernel_pad_2d( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, int start0, int start1,
int end0, int end1, int n0, int n1 )
{
// printf("kernel pad init\n");
// printf("block: [%i, %i, %i]\n", block[0], block[1] , block[2]);
// printf("grid: [%i, %i, %i]\n", grid[0], grid[1] , grid[2]);
dim3 grd(grid[0],grid[1]);
dim3 blk(block[0],block[1]);
kernel_pad_2d<<<grd,blk>>>(vin, vout, start0, start1, end0, end1, n0, n1);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel pad 2d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel pad 2d" );
};
template <typename type>
void cuda_kernel_unpad_2d( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, int start0, int start1,
int end0, int end1, int n0, int n1 )
{
dim3 grd(grid[0],grid[1]);
dim3 blk(block[0],block[1]);
kernel_unpad_2d<<<grd,blk>>>(vin, vout, start0, start1, end0, end1, n0, n1);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel unpad 2d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel unpad 2d" );
};
template <typename type>
void cuda_kernel_pad_3d( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2 )
{
// printf("kernel pad 3d init\n");
// printf("block: [%i, %i, %i]\n", block[0], block[1] , block[2]);
// printf("grid: [%i, %i, %i]\n", grid[0], grid[1] , grid[2]);
dim3 grd(grid[0],grid[1],grid[2]);
dim3 blk(block[0],block[1],block[2]);
kernel_pad_3d<<<grd,blk>>>(vin, vout, start0, start1, start2, end0, end1, end2, n0, n1, n2);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel pad 3d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel pad 3d" );
};
template <typename type>
void cuda_kernel_unpad_3d( std::vector<int> & grid, std::vector<int> & block,
const type * vin, type * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2 )
{
dim3 grd(grid[0],grid[1],grid[2]);
dim3 blk(block[0],block[1],block[2]);
kernel_unpad_3d<<<grd,blk>>>(vin, vout, start0, start1, start2, end0, end1, end2, n0, n1, n2);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel unpad 3d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel unpad 3d" );
};
template <typename type>
void cuda_kernel_grid_2d( std::vector<int> & grid, std::vector<int> & block,
type * x, type * y, double * sod,
int n0, int n1)
{
dim3 grd(grid[0],grid[1]);
dim3 blk(block[0],block[1]);
kernel_grid_2d<<<grd,blk>>>(x, y, sod, n0, n1);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel grid 2d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel grid 2d" );
};
template <typename type>
void cuda_kernel_grid_3d( std::vector<int> & grid, std::vector<int> & block,
type * x, type * y, type * z, double * sod,
int n0, int n1, int n2)
{
dim3 grd(grid[0],grid[1],grid[2]);
dim3 blk(block[0],block[1],block[2]);
kernel_grid_3d<<<grd,blk>>>(x, y, z, sod, n0, n1, n2);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel grid 3d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel grid 3d" );
};
template <typename type>
void cuda_kernel_affine_2d( std::vector<int> & grid, std::vector<int> & block,
const type * xin, const type * yin,
type * xout, type * yout,
const type * param, int n)
{
// printf("kernel affine 2d init\n");
// printf("block: [%i, %i, %i]\n", block[0], block[1] , block[2]);
// printf("grid: [%i, %i, %i]\n", grid[0], grid[1] , grid[2]);
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_affine_2d<<<grd,blk>>>(xin, yin, xout, yout, param, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel affine 2d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel affine 2d" );
};
template <typename type>
void cuda_kernel_affine_3d( std::vector<int> & grid, std::vector<int> & block,
const type * xin, const type * yin, const type * zin,
type * xout, type * yout, type * zout,
const type * param, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_affine_3d<<<grd,blk>>>(xin, yin, zin, xout, yout, zout, param, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel affine 3d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel affine 3d" );
};
template <typename type>
void cuda_kernel_affine_sod_2d( std::vector<int> & grid, std::vector<int> & block,
const type * xin, const type * yin,
type * xout, type * yout,
const double * sod, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_affine_sod_2d<<<grd,blk>>>(xin, yin, xout, yout, sod, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel affine sod 2d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel affine sod 2d" );
};
template <typename type>
void cuda_kernel_affine_sod_3d( std::vector<int> & grid, std::vector<int> & block,
const type * xin, const type * yin, const type * zin,
type * xout, type * yout, type * zout,
const double * sod, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_affine_sod_3d<<<grd,blk>>>(xin, yin, zin, xout, yout, zout, sod, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel affine sod 3d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel affine sod 3d" );
};
template <typename type>
void cuda_kernel_dfield_2d( std::vector<int> & grid, std::vector<int> & block,
const type * xin, const type * yin, // grid coordinates
const type * x, const type * y, // vector field
type * xout, type * yout, int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_dfield_2d<<<grd,blk>>>(xin, yin, x, y, xout, yout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel dfield 2d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel dfield 2d" );
};
template <typename type>
void cuda_kernel_dfield_3d( std::vector<int> & grid, std::vector<int> & block,
const type * xin, const type * yin, const type * zin, // grid coordinates
const type * x, const type * y, const type * z, // vector field
type * xout, type * yout, type * zout, // output coordinates
int n)
{
dim3 grd(grid[0]);
dim3 blk(block[0]);
kernel_dfield_3d<<<grd,blk>>>(xin, yin, zin, x, y, z, xout, yout, zout, n);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel dfield 3d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel dfield 3d" );
};
template <typename type>
void cuda_kernel_nearest_interpolation_2d( std::vector<int> & grid, std::vector<int> & block,
const type * xo, const type * yo,
const type * imgr, type * imgo,
int w, int h, //img ref width and height
int n0, int n1) //img out dims
{
dim3 grd(grid[0],grid[1]);
dim3 blk(block[0],block[1]);
kernel_nearest_interpolation_2d<<<grd,blk>>>(xo, yo, imgr, imgo, w, h, n0, n1);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel nearest interpolation 2d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel nearest interpolation 2d" );
};
template <typename type>
void cuda_kernel_nearest_interpolation_3d( std::vector<int> & grid, std::vector<int> & block,
const type * xo, const type * yo, const type * zo,
const type * imgr, type * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2)
{
dim3 grd(grid[0],grid[1],grid[2]);
dim3 blk(block[0],block[1],block[2]);
kernel_nearest_interpolation_3d<<<grd,blk>>>(xo, yo, zo, imgr, imgo, w, h, l, n0, n1, n2);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel nearest interpolation 3d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel nearest interpolation 3d" );
};
template <typename type>
void cuda_kernel_linear_interpolation_2d( std::vector<int> & grid, std::vector<int> & block,
const type * xo, const type * yo,
const type * imgr, type * imgo,
int w, int h, //img ref width and height
int n0, int n1) //img out dims
{
dim3 grd(grid[0],grid[1]);
dim3 blk(block[0],block[1]);
kernel_linear_interpolation_2d<<<grd,blk>>>(xo, yo, imgr, imgo, w, h, n0, n1);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel linear interpolation 2d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel linear interpolation 2d" );
};
template <typename type>
void cuda_kernel_linear_interpolation_3d( std::vector<int> & grid, std::vector<int> & block,
const type * xo, const type * yo, const type * zo,
const type * imgr, type * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2)
{
dim3 grd(grid[0],grid[1],grid[2]);
dim3 blk(block[0],block[1],block[2]);
kernel_linear_interpolation_3d<<<grd,blk>>>(xo, yo, zo, imgr, imgo, w, h, l, n0, n1, n2);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel linear interpolation 3d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel linear interpolation 3d" );
};
template <typename type>
void cuda_kernel_cubic_interpolation_2d( std::vector<int> & grid, std::vector<int> & block,
const type * xo, const type * yo,
const type * imgr, type * imgo,
int w, int h, //img ref width and height
int n0, int n1) //img out dims
{
dim3 grd(grid[0],grid[1]);
dim3 blk(block[0],block[1]);
kernel_cubic_interpolation_2d<<<grd,blk>>>(xo, yo, imgr, imgo, w, h, n0, n1);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel cubic interpolation 2d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel cubic interpolation 2d" );
};
template <typename type>
void cuda_kernel_cubic_interpolation_3d( std::vector<int> & grid, std::vector<int> & block,
const type * xo, const type * yo, const type * zo,
const type * imgr, type * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2)
{
dim3 grd(grid[0],grid[1],grid[2]);
dim3 blk(block[0],block[1],block[2]);
kernel_cubic_interpolation_3d<<<grd,blk>>>(xo, yo, zo, imgr, imgo, w, h, l, n0, n1, n2);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel cubic interpolation 3d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel cubic interpolation 3d" );
};
template <typename type>
void cuda_kernel_gradientx( std::vector<int> & grid, std::vector<int> & block,
const type * imgr, type * imgo,
int n0, int n1, int n2)
{
dim3 grd;
dim3 blk;
if (block[2] == 0)
{
grd = dim3(grid[0],grid[1]);
blk = dim3(block[0],block[1]);
}
else
{
grd = dim3(grid[0],grid[1],grid[2]);
blk = dim3(block[0],block[1],block[2]);
};
kernel_gradientx<<<grd,blk>>>(imgr, imgo, n0, n1, n2);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel gradient x" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel gradient x" );
};
template <typename type>
void cuda_kernel_gradienty( std::vector<int> & grid, std::vector<int> & block,
const type * imgr, type * imgo,
int n0, int n1, int n2)
{
dim3 grd;
dim3 blk;
if (block[2] == 0)
{
grd = dim3(grid[0],grid[1]);
blk = dim3(block[0],block[1]);
}
else
{
grd = dim3(grid[0],grid[1],grid[2]);
blk = dim3(block[0],block[1],block[2]);
};
kernel_gradienty<<<grd,blk>>>(imgr, imgo, n0, n1, n2);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel gradient y" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel gradient y" );
};
template <typename type>
void cuda_kernel_gradientz( std::vector<int> & grid, std::vector<int> & block,
const type * imgr, type * imgo,
int n0, int n1, int n2)
{
dim3 grd(grid[0],grid[1],grid[2]);
dim3 blk(block[0],block[1],block[2]);
kernel_gradientz<<<grd,blk>>>(imgr, imgo, n0, n1, n2);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel gradient z" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel gradient z" );
};
template <typename type>
void cuda_kernel_convolution_2d( std::vector<int> & grid, std::vector<int> & block,
const type * imgr, const type * kern, //kernel width
type * imgo, int n0, int n1, int kw0, int kw1)
{
dim3 grd(grid[0],grid[1]);
dim3 blk(block[0],block[1]);
kernel_convolution_2d<<<grd,blk>>>(imgr, kern, imgo, n0, n1, kw0, kw1);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel convolution 2d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel convolution 2d" );
};
template <typename type>
void cuda_kernel_convolution_3d( std::vector<int> & grid, std::vector<int> & block,
const type * imgr, const type * kern, //kernel width
type * imgo, int n0, int n1, int n2, int kw0, int kw1, int kw2)
{
dim3 grd(grid[0],grid[1],grid[2]);
dim3 blk(block[0],block[1],block[2]);
kernel_convolution_3d<<<grd,blk>>>(imgr, kern, imgo, n0, n1, n2, kw0, kw1, kw2);
imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel convolution 3d" );
imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel convolution 3d" );
};
template <typename type>
void cuda_kernel_fft_2d( std::vector<int> & grid, std::vector<int> & block,
const type * in_real, const type * in_img,
type * out_real, type * out_img, int n0, int n1, bool forward )
{
;
};
// specialization
template <> void cuda_kernel_fft_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * in_real, const float * in_img,
float * out_real, float * out_img, int n0, int n1, bool forward )
{
int N = n0*n1;
cufftComplex *in;
cufftComplex *out;
cudaMalloc(&in, N*sizeof(cufftComplex));
cudaMalloc(&out, N*sizeof(cufftComplex));
float * tmpi = (float *) in;
// COPY in_real and in_img to in
imart_assert_kernel ( cudaMemcpy2D(tmpi, 2 * sizeof(tmpi[0]),
in_real, 1 * sizeof(in_real[0]), sizeof(in_real[0]),
N, cudaMemcpyDeviceToDevice), "Error copy device to device, real to complex");
imart_assert_kernel ( cudaMemcpy2D(tmpi + 1, 2 * sizeof(tmpi[0]),
in_img, 1 * sizeof(in_img[0]), sizeof(in_img[0]),
N, cudaMemcpyDeviceToDevice), "Error copy device to device, imaginary to complex");
cufftHandle p_fft;
// std::cout << "input\n";
cufftPlan2d(&p_fft, n1, n0, CUFFT_C2C);
// std::cout << "plan\n";
if (forward)
{
// Forward
// std::cout << "execute\n";
cufftExecC2C(p_fft, (cufftComplex *)in, (cufftComplex *)out, CUFFT_FORWARD);
}
else
{
// Inverse
// std::cout << "execute\n";
cufftExecC2C(p_fft, (cufftComplex *)in, (cufftComplex *)out, CUFFT_INVERSE);
};
float * tmpo = (float *) out;
// COPY out to out_real and out_img
imart_assert_kernel ( cudaMemcpy2D(out_real, 1 * sizeof(out_real[0]),
tmpo, 2 * sizeof(tmpo[0]), sizeof(out_real[0]),
N, cudaMemcpyDeviceToDevice), "Error copy device to device, complex to real");
imart_assert_kernel ( cudaMemcpy2D(out_img, 1 * sizeof(out_img[0]),
tmpo+1, 2 * sizeof(tmpo[0]), sizeof(out_img[0]),
N, cudaMemcpyDeviceToDevice), "Error copy device to device, complex to real");
cufftDestroy(p_fft);
cudaFree(in);
cudaFree(out);
};
// specialization
template <> void cuda_kernel_fft_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * in_real, const double * in_img,
double * out_real, double * out_img, int n0, int n1, bool forward )
{
int N = n0*n1;
cufftDoubleComplex *in;
cufftDoubleComplex *out;
cudaMalloc(&in, N*sizeof(cufftDoubleComplex));
cudaMalloc(&out, N*sizeof(cufftDoubleComplex));
double * tmpi = (double *) in;
// COPY in_real and in_img to in
imart_assert_kernel ( cudaMemcpy2D(tmpi, 2 * sizeof(tmpi[0]),
in_real, 1 * sizeof(in_real[0]), sizeof(in_real[0]),
N, cudaMemcpyDeviceToDevice), "Error copy device to device, real to complex");
imart_assert_kernel ( cudaMemcpy2D(tmpi + 1, 2 * sizeof(tmpi[0]),
in_img, 1 * sizeof(in_img[0]), sizeof(in_img[0]),
N, cudaMemcpyDeviceToDevice), "Error copy device to device, imaginary to complex");
cufftHandle p_fft;
// std::cout << "input\n";
cufftPlan2d(&p_fft, n1, n0, CUFFT_C2C);
// std::cout << "plan\n";
if (forward)
{
// Forward
// std::cout << "execute\n";
cufftExecZ2Z(p_fft, (cufftDoubleComplex *)in, (cufftDoubleComplex *)out, CUFFT_FORWARD);
}
else
{
// Inverse
// std::cout << "execute\n";
cufftExecZ2Z(p_fft, (cufftDoubleComplex *)in, (cufftDoubleComplex *)out, CUFFT_INVERSE);
};
double * tmpo = (double *) out;
// COPY out to out_real and out_img
imart_assert_kernel ( cudaMemcpy2D(out_real, 1 * sizeof(out_real[0]),
tmpo, 2 * sizeof(tmpo[0]), sizeof(out_real[0]),
N, cudaMemcpyDeviceToDevice), "Error copy device to device, complex to real");
imart_assert_kernel ( cudaMemcpy2D(out_img, 1 * sizeof(out_img[0]),
tmpo+1, 2 * sizeof(tmpo[0]), sizeof(out_img[0]),
N, cudaMemcpyDeviceToDevice), "Error copy device to device, complex to real");
cufftDestroy(p_fft);
cudaFree(in);
cudaFree(out);
};
template <typename type>
void cuda_kernel_fft_3d( std::vector<int> & grid, std::vector<int> & block,
const type * in_real, const type * in_img,
type * out_real, type * out_img, int n0, int n1, int n2, bool forward )
{
;
};
// specialization
template <> void cuda_kernel_fft_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * in_real, const float * in_img,
float * out_real, float * out_img, int n0, int n1, int n2, bool forward )
{
int N = n0*n1*n2;
cufftComplex *in;
cufftComplex *out;
cudaMalloc(&in, N*sizeof(cufftComplex));
cudaMalloc(&out, N*sizeof(cufftComplex));
float * tmpi = (float *) in;
// COPY in_real and in_img to in
imart_assert_kernel ( cudaMemcpy2D(tmpi, 2 * sizeof(tmpi[0]),
in_real, 1 * sizeof(in_real[0]), sizeof(in_real[0]),
N, cudaMemcpyDeviceToDevice), "Error copy device to device, real to complex");
imart_assert_kernel ( cudaMemcpy2D(tmpi + 1, 2 * sizeof(tmpi[0]),
in_img, 1 * sizeof(in_img[0]), sizeof(in_img[0]),
N, cudaMemcpyDeviceToDevice), "Error copy device to device, imaginary to complex");
cufftHandle p_fft;
// std::cout << "input\n";
cufftPlan3d(&p_fft, n2, n1, n0, CUFFT_C2C);
// std::cout << "plan\n";
if (forward)
{
// Forward
// std::cout << "execute\n";
cufftExecC2C(p_fft, (cufftComplex *)in, (cufftComplex *)out, CUFFT_FORWARD);
}
else
{
// Inverse
// std::cout << "execute\n";
cufftExecC2C(p_fft, (cufftComplex *)in, (cufftComplex *)out, CUFFT_INVERSE);
};
float * tmpo = (float *) out;
// COPY out to out_real and out_img
imart_assert_kernel ( cudaMemcpy2D(out_real, 1 * sizeof(out_real[0]),
tmpo, 2 * sizeof(tmpo[0]), sizeof(out_real[0]),
N, cudaMemcpyDeviceToDevice), "Error copy device to device, complex to real");
imart_assert_kernel ( cudaMemcpy2D(out_img, 1 * sizeof(out_img[0]),
tmpo+1, 2 * sizeof(tmpo[0]), sizeof(out_img[0]),
N, cudaMemcpyDeviceToDevice), "Error copy device to device, complex to real");
cufftDestroy(p_fft);
cudaFree(in);
cudaFree(out);
};
// specialization
template <> void cuda_kernel_fft_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * in_real, const double * in_img,
double * out_real, double * out_img, int n0, int n1, int n2, bool forward )
{
int N = n0*n1*n2;
cufftDoubleComplex *in;
cufftDoubleComplex *out;
cudaMalloc(&in, N*sizeof(cufftDoubleComplex));
cudaMalloc(&out, N*sizeof(cufftDoubleComplex));
double * tmpi = (double *) in;
// COPY in_real and in_img to in
imart_assert_kernel ( cudaMemcpy2D(tmpi, 2 * sizeof(tmpi[0]),
in_real, 1 * sizeof(in_real[0]), sizeof(in_real[0]),
N, cudaMemcpyDeviceToDevice), "Error copy device to device, real to complex");
imart_assert_kernel ( cudaMemcpy2D(tmpi + 1, 2 * sizeof(tmpi[0]),
in_img, 1 * sizeof(in_img[0]), sizeof(in_img[0]),
N, cudaMemcpyDeviceToDevice), "Error copy device to device, imaginary to complex");
cufftHandle p_fft;
// std::cout << "input\n";
cufftPlan3d(&p_fft, n2, n1, n0, CUFFT_C2C);
// std::cout << "plan\n";
if (forward)
{
// Forward
// std::cout << "execute\n";
cufftExecZ2Z(p_fft, (cufftDoubleComplex *)in, (cufftDoubleComplex *)out, CUFFT_FORWARD);
}
else
{
// Inverse
// std::cout << "execute\n";
cufftExecZ2Z(p_fft, (cufftDoubleComplex *)in, (cufftDoubleComplex *)out, CUFFT_INVERSE);
};
double * tmpo = (double *) out;
// COPY out to out_real and out_img
imart_assert_kernel ( cudaMemcpy2D(out_real, 1 * sizeof(out_real[0]),
tmpo, 2 * sizeof(tmpo[0]), sizeof(out_real[0]),
N, cudaMemcpyDeviceToDevice), "Error copy device to device, complex to real");
imart_assert_kernel ( cudaMemcpy2D(out_img, 1 * sizeof(out_img[0]),
tmpo+1, 2 * sizeof(tmpo[0]), sizeof(out_img[0]),
N, cudaMemcpyDeviceToDevice), "Error copy device to device, complex to real");
cufftDestroy(p_fft);
cudaFree(in);
cudaFree(out);
};
// template <typename type>
// void cuda_kernel_( std::vector<int> & grid, std::vector<int> & block,
// )
// {
// dim3 grd(grid[0],grid[1],grid[2]);
// dim3 blk(block[0],block[1],block[2]);
// kernel_<<<grd,blk>>>();
// imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel" );
// imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel" );
// };
// ===========================================
// Explicit instanciation
// ===========================================
// CASTINGS
template void cuda_kernel_cast <float,double>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, double * vout, int n );
template void cuda_kernel_cast <double,float>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, float * vout, int n );
template void cuda_kernel_cast <int,float>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, float * vout, int n );
template void cuda_kernel_cast <float,int>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, int * vout, int n );
template void cuda_kernel_cast <int,double>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, double * vout, int n );
template void cuda_kernel_cast <double,int>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, int * vout, int n );
template void cuda_kernel_cast <float,unsigned short>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, unsigned short * vout, int n );
template void cuda_kernel_cast <unsigned short,float>( std::vector<int> & grid, std::vector<int> & block,
const unsigned short * vin, float * vout, int n );
template void cuda_kernel_cast <double,unsigned short>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, unsigned short * vout, int n );
template void cuda_kernel_cast <unsigned short,double>( std::vector<int> & grid, std::vector<int> & block,
const unsigned short * vin, double * vout, int n );
template void cuda_kernel_cast <float,unsigned int>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, unsigned int * vout, int n );
template void cuda_kernel_cast <unsigned int,float>( std::vector<int> & grid, std::vector<int> & block,
const unsigned int * vin, float * vout, int n );
template void cuda_kernel_cast <double,unsigned int>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, unsigned int * vout, int n );
template void cuda_kernel_cast <unsigned int,double>( std::vector<int> & grid, std::vector<int> & block,
const unsigned int * vin, double * vout, int n );
template void cuda_kernel_cast <float,unsigned char>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, unsigned char * vout, int n );
template void cuda_kernel_cast <unsigned char,float>( std::vector<int> & grid, std::vector<int> & block,
const unsigned char * vin, float * vout, int n );
template void cuda_kernel_cast <double,unsigned char>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, unsigned char * vout, int n );
template void cuda_kernel_cast <unsigned char,double>( std::vector<int> & grid, std::vector<int> & block,
const unsigned char * vin, double * vout, int n );
template void cuda_kernel_cast <float,float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, int n );
template void cuda_kernel_cast <double,double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, int n );
template void cuda_kernel_assign<float>( std::vector<int> & grid, std::vector<int> & block,
float * vin, float value, int n );
template void cuda_kernel_copy<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, int n );
template void cuda_kernel_add<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_sub<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_mul<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_div<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_pow<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_add_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n );
template void cuda_kernel_sub_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n );
template void cuda_kernel_sub_scalar_inv<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n );
template void cuda_kernel_mul_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n );
template void cuda_kernel_div_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n );
template void cuda_kernel_div_scalar_inv<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n );
template void cuda_kernel_pow_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n );
template void cuda_kernel_pow_scalar_inv<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n );
template void cuda_kernel_equal<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_greater<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_less<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_greater_equal<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_less_equal<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin1, const float * vin2, float * vout, int n );
template void cuda_kernel_equal_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n);
template void cuda_kernel_greater_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n);
template void cuda_kernel_less_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n);
template void cuda_kernel_greater_equal_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n);
template void cuda_kernel_less_equal_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, float scalar, int n);
template void cuda_kernel_replace<float>( std::vector<int> & grid, std::vector<int> & block,
const float * idxs, const float * vin, float * vout, int n);
template void cuda_kernel_replace_scalar<float>( std::vector<int> & grid, std::vector<int> & block,
const float * idxs, float * vout, float value, int n);
template void cuda_kernel_sum<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, int n );
template void cuda_kernel_min<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, int n );
template void cuda_kernel_max<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, int n );
template void cuda_kernel_pad_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, int start0, int start1,
int end0, int end1, int n0, int n1 );
template void cuda_kernel_unpad_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, int start0, int start1,
int end0, int end1, int n0, int n1);
template void cuda_kernel_pad_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2);
template void cuda_kernel_unpad_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * vin, float * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2);
template void cuda_kernel_grid_2d<float>( std::vector<int> & grid, std::vector<int> & block,
float * x, float * y, double * sod,
int n0, int n1);
template void cuda_kernel_grid_3d<float>( std::vector<int> & grid, std::vector<int> & block,
float * x, float * y, float * z, double * sod,
int n0, int n1, int n2);
template void cuda_kernel_affine_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xin, const float * yin,
float * xout, float * yout,
const float * param, int n);
template void cuda_kernel_affine_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xin, const float * yin, const float * zin,
float * xout, float * yout, float * zout,
const float * param, int n) ;
template void cuda_kernel_affine_sod_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xin, const float * yin,
float * xout, float * yout,
const double * sod, int n);
template void cuda_kernel_affine_sod_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xin, const float * yin, const float * zin,
float * xout, float * yout, float * zout,
const double * sod, int n);
template void cuda_kernel_dfield_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xin, const float * yin, // grid coordinates
const float * x, const float * y, // vector field
float * xout, float * yout, int n);
template void cuda_kernel_dfield_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xin, const float * yin, const float * zin, // grid coordinates
const float * x, const float * y, const float * z, // vector field
float * xout, float * yout, float * zout, int n);
template void cuda_kernel_nearest_interpolation_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xo, const float * yo,
const float * imgr, float * imgo,
int w, int h, //img ref width and height
int n0, int n1); //img out dims
template void cuda_kernel_nearest_interpolation_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xo, const float * yo, const float * zo,
const float * imgr, float * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2 );
template void cuda_kernel_linear_interpolation_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xo, const float * yo,
const float * imgr, float * imgo,
int w, int h, //img ref width and height
int n0, int n1); //img out dims
template void cuda_kernel_linear_interpolation_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xo, const float * yo, const float * zo,
const float * imgr, float * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2);
template void cuda_kernel_cubic_interpolation_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * xo, const float * yo,
const float * imgr, float * imgo,
int w, int h, //img ref width and height
int n0, int n1); //img out dims
// template void cuda_kernel_cubic_interpolation_3d<float>( std::vector<int> & grid, std::vector<int> & block,
// const float * xo, const float * yo, const float * zo,
// const float * imgr, float * imgo,
// int w, int h, int l, //img ref width, height and length
// int n0, int n1, int n2);
template void cuda_kernel_gradientx<float>( std::vector<int> & grid, std::vector<int> & block,
const float * imgr, float * imgo,
int n0, int n1, int n2);
template void cuda_kernel_gradienty<float>( std::vector<int> & grid, std::vector<int> & block,
const float * imgr, float * imgo,
int n0, int n1, int n2);
template void cuda_kernel_gradientz<float>( std::vector<int> & grid, std::vector<int> & block,
const float * imgr, float * imgo,
int n0, int n1, int n2);
template void cuda_kernel_convolution_2d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * imgr, const float * kern, //kernel width
float * imgo, int n0, int n1, int kw0, int kw1);
template void cuda_kernel_convolution_3d<float>( std::vector<int> & grid, std::vector<int> & block,
const float * imgr, const float * kern, //kernel width
float * imgo, int n0, int n1, int n2, int kw0, int kw1, int kw2);
template void cuda_kernel_assign<double>( std::vector<int> & grid, std::vector<int> & block,
double * vin, double value, int n );
template void cuda_kernel_copy<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, int n );
template void cuda_kernel_add<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_sub<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_mul<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_div<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_pow<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_add_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n );
template void cuda_kernel_sub_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n );
template void cuda_kernel_sub_scalar_inv<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n );
template void cuda_kernel_mul_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n );
template void cuda_kernel_div_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n );
template void cuda_kernel_div_scalar_inv<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n );
template void cuda_kernel_pow_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n );
template void cuda_kernel_pow_scalar_inv<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n );
template void cuda_kernel_equal<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_greater<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_less<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_greater_equal<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_less_equal<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin1, const double * vin2, double * vout, int n );
template void cuda_kernel_equal_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n);
template void cuda_kernel_greater_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n);
template void cuda_kernel_less_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n);
template void cuda_kernel_greater_equal_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n);
template void cuda_kernel_less_equal_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, double scalar, int n);
template void cuda_kernel_replace<double>( std::vector<int> & grid, std::vector<int> & block,
const double * idxs, const double * vin, double * vout, int n);
template void cuda_kernel_replace_scalar<double>( std::vector<int> & grid, std::vector<int> & block,
const double * idxs, double * vout, double value, int n);
template void cuda_kernel_sum<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, int n );
template void cuda_kernel_min<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, int n );
template void cuda_kernel_max<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, int n );
template void cuda_kernel_pad_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, int start0, int start1,
int end0, int end1, int n0, int n1 );
template void cuda_kernel_unpad_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, int start0, int start1,
int end0, int end1, int n0, int n1);
template void cuda_kernel_pad_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2);
template void cuda_kernel_unpad_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * vin, double * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2);
template void cuda_kernel_grid_2d<double>( std::vector<int> & grid, std::vector<int> & block,
double * x, double * y, double * sod,
int n0, int n1);
template void cuda_kernel_grid_3d<double>( std::vector<int> & grid, std::vector<int> & block,
double * x, double * y, double * z, double * sod,
int n0, int n1, int n2);
template void cuda_kernel_affine_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xin, const double * yin,
double * xout, double * yout,
const double * param, int n);
template void cuda_kernel_affine_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xin, const double * yin, const double * zin,
double * xout, double * yout, double * zout,
const double * param, int n) ;
template void cuda_kernel_affine_sod_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xin, const double * yin,
double * xout, double * yout,
const double * sod, int n);
template void cuda_kernel_affine_sod_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xin, const double * yin, const double * zin,
double * xout, double * yout, double * zout,
const double * sod, int n);
template void cuda_kernel_dfield_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xin, const double * yin, // grid coordinates
const double * x, const double * y, // vector field
double * xout, double * yout, int n);
template void cuda_kernel_dfield_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xin, const double * yin, const double * zin, // grid coordinates
const double * x, const double * y, const double * z, // vector field
double * xout, double * yout, double * zout, int n);
template void cuda_kernel_nearest_interpolation_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xo, const double * yo,
const double * imgr, double * imgo,
int w, int h, //img ref width and height
int n0, int n1); //img out dims
template void cuda_kernel_nearest_interpolation_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xo, const double * yo, const double * zo,
const double * imgr, double * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2);
template void cuda_kernel_linear_interpolation_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xo, const double * yo,
const double * imgr, double * imgo,
int w, int h, //img ref width and height
int n0, int n1); //img out dims
template void cuda_kernel_linear_interpolation_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xo, const double * yo, const double * zo,
const double * imgr, double * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2);
template void cuda_kernel_cubic_interpolation_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xo, const double * yo,
const double * imgr, double * imgo,
int w, int h, //img ref width and height
int n0, int n1); //img out dims
template void cuda_kernel_cubic_interpolation_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * xo, const double * yo, const double * zo,
const double * imgr, double * imgo,
int w, int h, int l, //img ref width, height and length
int n0, int n1, int n2);
template void cuda_kernel_gradientx<double>( std::vector<int> & grid, std::vector<int> & block,
const double * imgr, double * imgo,
int n0, int n1, int n2);
template void cuda_kernel_gradienty<double>( std::vector<int> & grid, std::vector<int> & block,
const double * imgr, double * imgo,
int n0, int n1, int n2);
template void cuda_kernel_gradientz<double>( std::vector<int> & grid, std::vector<int> & block,
const double * imgr, double * imgo,
int n0, int n1, int n2);
template void cuda_kernel_convolution_2d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * imgr, const double * kern, //kernel width
double * imgo, int n0, int n1, int kw0, int kw1);
template void cuda_kernel_convolution_3d<double>( std::vector<int> & grid, std::vector<int> & block,
const double * imgr, const double * kern, //kernel width
double * imgo, int n0, int n1, int n2, int kw0, int kw1, int kw2);
template void cuda_kernel_assign<int>( std::vector<int> & grid, std::vector<int> & block,
int * vin, int value, int n );
template void cuda_kernel_copy<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int n );
template void cuda_kernel_add<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_sub<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_mul<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_div<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_pow<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_add_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n );
template void cuda_kernel_sub_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n );
template void cuda_kernel_sub_scalar_inv<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n );
template void cuda_kernel_mul_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n );
template void cuda_kernel_div_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n );
template void cuda_kernel_div_scalar_inv<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n );
template void cuda_kernel_pow_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n );
template void cuda_kernel_pow_scalar_inv<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n );
template void cuda_kernel_equal<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_greater<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_less<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_greater_equal<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_less_equal<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin1, const int * vin2, int * vout, int n );
template void cuda_kernel_equal_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n);
template void cuda_kernel_greater_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n);
template void cuda_kernel_less_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n);
template void cuda_kernel_greater_equal_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n);
template void cuda_kernel_less_equal_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int scalar, int n);
template void cuda_kernel_replace<int>( std::vector<int> & grid, std::vector<int> & block,
const int * idxs, const int * vin, int * vout, int n);
template void cuda_kernel_replace_scalar<int>( std::vector<int> & grid, std::vector<int> & block,
const int * idxs, int * vout, int value, int n);
template void cuda_kernel_sum<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int n );
template void cuda_kernel_min<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int n );
template void cuda_kernel_max<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int n );
template void cuda_kernel_pad_2d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int start0, int start1,
int end0, int end1, int n0, int n1 );
template void cuda_kernel_unpad_2d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int start0, int start1,
int end0, int end1, int n0, int n1 );
template void cuda_kernel_pad_3d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2 );
template void cuda_kernel_unpad_3d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * vin, int * vout, int start0, int start1, int start2,
int end0, int end1, int end2, int n0, int n1, int n2 );
template void cuda_kernel_grid_2d<int>( std::vector<int> & grid, std::vector<int> & block,
int * x, int * y, double * sod,
int n0, int n1 );
template void cuda_kernel_grid_3d<int>( std::vector<int> & grid, std::vector<int> & block,
int * x, int * y, int * z, double * sod,
int n0, int n1, int n2 );
template void cuda_kernel_affine_2d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * xin, const int * yin,
int * xout, int * yout,
const int * param, int n );
template void cuda_kernel_affine_3d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * xin, const int * yin, const int * zin,
int * xout, int * yout, int * zout,
const int * param, int n );
template void cuda_kernel_affine_sod_2d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * xin, const int * yin,
int * xout, int * yout,
const double * sod, int n);
template void cuda_kernel_affine_sod_3d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * xin, const int * yin, const int * zin,
int * xout, int * yout, int * zout,
const double * sod, int n );
template void cuda_kernel_dfield_2d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * xin, const int * yin, // grid coordinates
const int * x, const int * y, // vector field
int * xout, int * yout, int n );
template void cuda_kernel_dfield_3d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * xin, const int * yin, const int * zin, // grid coordinates
const int * x, const int * y, const int * z, // vector field
int * xout, int * yout, int * zout, int n );
// template void cuda_kernel_nearest_interpolation_2d<int>( std::vector<int> & grid, std::vector<int> & block,
// const int * xo, const int * yo,
// const int * imgr, int * imgo,
// int w, int h, //img ref width and height
// int n0, int n1); //img out dims
// template void cuda_kernel_nearest_interpolation_3d<int>( std::vector<int> & grid, std::vector<int> & block,
// const int * xo, const int * yo, const int * zo,
// const int * imgr, int * imgo,
// int w, int h, int l, //img ref width, height and length
// int n0, int n1, int n2);
// template void cuda_kernel_linear_interpolation_2d<int>( std::vector<int> & grid, std::vector<int> & block,
// const int * xo, const int * yo,
// const int * imgr, int * imgo,
// int w, int h, //img ref width and height
// int n0, int n1); //img out dims
// template void cuda_kernel_linear_interpolation_3d<int>( std::vector<int> & grid, std::vector<int> & block,
// const int * xo, const int * yo, const int * zo,
// const int * imgr, int * imgo,
// int w, int h, int l, //img ref width, height and length
// int n0, int n1, int n2);
template void cuda_kernel_gradientx<int>( std::vector<int> & grid, std::vector<int> & block,
const int * imgr, int * imgo,
int n0, int n1, int n2);
template void cuda_kernel_gradienty<int>( std::vector<int> & grid, std::vector<int> & block,
const int * imgr, int * imgo,
int n0, int n1, int n2);
template void cuda_kernel_gradientz<int>( std::vector<int> & grid, std::vector<int> & block,
const int * imgr, int * imgo,
int n0, int n1, int n2);
template void cuda_kernel_convolution_2d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * imgr, const int * kern, //kernel width
int * imgo, int n0, int n1, int kw0, int kw1);
template void cuda_kernel_convolution_3d<int>( std::vector<int> & grid, std::vector<int> & block,
const int * imgr, const int * kern, //kernel width
int * imgo, int n0, int n1, int n2, int kw0, int kw1, int kw2);
template void cuda_kernel_assign<unsigned short>( std::vector<int> & grid, std::vector<int> & block,
unsigned short * vin, unsigned short value, int n );
template void cuda_kernel_copy<unsigned short>( std::vector<int> & grid, std::vector<int> & block,
const unsigned short * vin, unsigned short * vout, int n );
template void cuda_kernel_assign<unsigned int>( std::vector<int> & grid, std::vector<int> & block,
unsigned int * vin, unsigned int value, int n );
template void cuda_kernel_copy<unsigned int>( std::vector<int> & grid, std::vector<int> & block,
const unsigned int * vin, unsigned int * vout, int n );
template void cuda_kernel_assign<unsigned char>( std::vector<int> & grid, std::vector<int> & block,
unsigned char * vin, unsigned char value, int n );
template void cuda_kernel_copy<unsigned char>( std::vector<int> & grid, std::vector<int> & block,
const unsigned char * vin, unsigned char * vout, int n );
template void cuda_kernel_assign<short>( std::vector<int> & grid, std::vector<int> & block,
short * vin, short value, int n );
template void cuda_kernel_copy<short>( std::vector<int> & grid, std::vector<int> & block,
const short * vin, short * vout, int n );
template void cuda_kernel_assign<char>( std::vector<int> & grid, std::vector<int> & block,
char * vin, char value, int n );
template void cuda_kernel_copy<char>( std::vector<int> & grid, std::vector<int> & block,
const char * vin, char * vout, int n );
|
ce10a0b685a38e0ef8e0bfc9d6622226b15061a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "einsum_kernel.cu"
static PyObject *einsum_deriv_gpu(PyObject *self, PyObject *args){
hipError_t err;
int deriv_layer_ind; // which dimensions to sum over
int l; // sigma31 buffer ind
int output_ind; // buffer to store output
int gpu_ind;
if (!PyArg_ParseTuple(args, "iiii", &deriv_layer_ind, &l, &output_ind, &gpu_ind))
return NULL;
int g = gpu_ind;
if(l < 0 || l > N_SIGMAS){
printf("invalid sigma index %i\n", l);
return NULL;
}
if(g < 0 || g > N_GPUS){
printf("invalid gpu index %i\n", g);
return NULL;
}
if(output_ind < 0 || output_ind > N_OUTPUTS){
printf("invalid output_ind %i\n", output_ind);
return NULL;
}
if(deriv_layer_ind < 0 || deriv_layer_ind > N_LAYERS){
printf("invalid deriv_layer_ind %i\n", deriv_layer_ind);
return NULL;
}
if(sum_res_c[g][output_ind] != 0){
printf("output buffer used, call sigma_return first, for gpu %i, sigma_ind %i, output_ind: %i\n", gpu_ind, l, output_ind);
return NULL;
}
if(sigma31s_c[g][l] == 0){
printf("sigma buffer not initialized on gpu %i for layer %i\n", gpu_ind, l);
return NULL;
}
if(F1s_c[g] == 0){
printf("filter buffers not initialized on gpu %i\n", g);
return NULL;
}
hipSetDevice(gpu_ind); CHECK_CUDA_ERR
////////////////////////////////////////////////////////////////////////// which indices do we unravel across threads?
unsigned long long output_sz;
dim3 thread_sz;
dim3 grid_sz;
if(deriv_layer_ind == 0){ // prediction (no deriv)
thread_sz.x = s1*s2*s2*s3;
//thread_sz.y = n0;
output_sz = N_C * N_C;
grid_sz.x = N_C;
grid_sz.y = N_C;
}else if(deriv_layer_ind == 1){ // F1 deriv
thread_sz.x = s2*s2*s3*s3;
output_sz = N_C * N_C * n1 * n0 * s1 * s1;
grid_sz.x = N_C * N_C * s1 * s1;
grid_sz.y = n1;
grid_sz.z = n0;
}else if(deriv_layer_ind == 2){ // F2 deriv
thread_sz.x = s1*s1*s3*s3;
output_sz = N_C * N_C * n2 * n1 * s2 * s2;
grid_sz.x = N_C * N_C * s2 * s2;
grid_sz.y = n2;
grid_sz.z = n1;
}else if(deriv_layer_ind == 3){ // F3 deriv
thread_sz.x = s1*s1*s2;//*s2;
output_sz = N_C * N_C * n3 * n2 * s3 * s3;
grid_sz.x = N_C * N_C * s3 * s3;
grid_sz.y = n3;
grid_sz.z = n2;
}else if(deriv_layer_ind == 4){ // FL deriv
thread_sz.x = s1*s1*s2;//*s2;
output_sz = N_C * n3 * max_output_sz3 * max_output_sz3;
grid_sz.x = N_C * max_output_sz3;
grid_sz.y = max_output_sz3;
grid_sz.z = n3;
}
deriv_layer_ind_res[g][output_ind] = deriv_layer_ind;
/////////////////////////////////// cuda mem
hipMalloc((void**) &sum_res_c[g][output_ind], output_sz * DATA_TYPE_SZ); CHECK_CUDA_ERR
// indexing products
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1_n0_n1s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l]*n3s[g][l]*s2s[g][l]*s2s[g][l]*n2s[g][l]*s1s[g][l]*s1s[g][l]*n0s[g][l]*n1s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1_n0s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l]*n3s[g][l]*s2s[g][l]*s2s[g][l]*n2s[g][l]*s1s[g][l]*s1s[g][l]*n0s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l]*n3s[g][l]*s2s[g][l]*s2s[g][l]*n2s[g][l]*s1s[g][l]*s1s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l]*n3s[g][l]*s2s[g][l]*s2s[g][l]*n2s[g][l]*s1s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l]*n3s[g][l]*s2s[g][l]*s2s[g][l]*n2s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l]*n3s[g][l]*s2s[g][l]*s2s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3_n3_s2s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l]*n3s[g][l]*s2s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3_n3s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l]*n3s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3s = max_output_sz3s[g][l]*max_output_sz3s[g][l];
IND_DTYPE max_output_sz3s_local = max_output_sz3s[g][l];
IND_DTYPE z2b = 1;
// check which dims should be broadcasted
if(n1s[g][l] != n1){
max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1_n0s = 0;
}
if(n0s[g][l] != n0){
max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1s = 0;
}
if(s1s[g][l] != s1){
max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2s = 0;
max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1s = 0;
}
if(n2s[g][l] != n2){
max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2s = 0;
}
if(s2s[g][l] != s2){
max_output_sz3_max_output_sz3_s3_s3_n3s = 0;
max_output_sz3_max_output_sz3_s3_s3_n3_s2s = 0;
}
if(s3s[g][l] != s3){
max_output_sz3_max_output_sz3s = 0;
max_output_sz3_max_output_sz3_s3s = 0;
}
if(n3s[g][l] != n3){
max_output_sz3_max_output_sz3_s3_s3s = 0;
}
if(max_output_sz3s[g][l] != max_output_sz3){
max_output_sz3s_local = 0;
z2b = 0;
}
//////////////////////////////////////////////////////////////////////////
hipLaunchKernelGGL(( kernel_deriv) , dim3(grid_sz), dim3(thread_sz), DATA_TYPE_SZ , 0, sum_res_c[g][output_ind], sigma31s_c[g][l], F1s_c[g], F2s_c[g], F3s_c[g], FLs_c[g],
max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1_n0_n1s, max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1_n0s,
max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1s, max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1s, max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2s,
max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2s, max_output_sz3_max_output_sz3_s3_s3_n3_s2s, max_output_sz3_max_output_sz3_s3_s3_n3s, max_output_sz3_max_output_sz3_s3_s3s,
max_output_sz3_max_output_sz3_s3s, max_output_sz3_max_output_sz3s, z2b, n0, n0s[g][l], n1, n1s[g][l], n2, n2s[g][l], n3, n3s[g][l],
max_output_sz3, max_output_sz3s_local, s1, s1s[g][l], s2, s2s[g][l], s3, s3s[g][l], N_C, deriv_layer_ind);
CHECK_CUDA_ERR
Py_INCREF(Py_None);
return Py_None;
}
| ce10a0b685a38e0ef8e0bfc9d6622226b15061a1.cu | #include "einsum_kernel.cu"
static PyObject *einsum_deriv_gpu(PyObject *self, PyObject *args){
cudaError_t err;
int deriv_layer_ind; // which dimensions to sum over
int l; // sigma31 buffer ind
int output_ind; // buffer to store output
int gpu_ind;
if (!PyArg_ParseTuple(args, "iiii", &deriv_layer_ind, &l, &output_ind, &gpu_ind))
return NULL;
int g = gpu_ind;
if(l < 0 || l > N_SIGMAS){
printf("invalid sigma index %i\n", l);
return NULL;
}
if(g < 0 || g > N_GPUS){
printf("invalid gpu index %i\n", g);
return NULL;
}
if(output_ind < 0 || output_ind > N_OUTPUTS){
printf("invalid output_ind %i\n", output_ind);
return NULL;
}
if(deriv_layer_ind < 0 || deriv_layer_ind > N_LAYERS){
printf("invalid deriv_layer_ind %i\n", deriv_layer_ind);
return NULL;
}
if(sum_res_c[g][output_ind] != 0){
printf("output buffer used, call sigma_return first, for gpu %i, sigma_ind %i, output_ind: %i\n", gpu_ind, l, output_ind);
return NULL;
}
if(sigma31s_c[g][l] == 0){
printf("sigma buffer not initialized on gpu %i for layer %i\n", gpu_ind, l);
return NULL;
}
if(F1s_c[g] == 0){
printf("filter buffers not initialized on gpu %i\n", g);
return NULL;
}
cudaSetDevice(gpu_ind); CHECK_CUDA_ERR
////////////////////////////////////////////////////////////////////////// which indices do we unravel across threads?
unsigned long long output_sz;
dim3 thread_sz;
dim3 grid_sz;
if(deriv_layer_ind == 0){ // prediction (no deriv)
thread_sz.x = s1*s2*s2*s3;
//thread_sz.y = n0;
output_sz = N_C * N_C;
grid_sz.x = N_C;
grid_sz.y = N_C;
}else if(deriv_layer_ind == 1){ // F1 deriv
thread_sz.x = s2*s2*s3*s3;
output_sz = N_C * N_C * n1 * n0 * s1 * s1;
grid_sz.x = N_C * N_C * s1 * s1;
grid_sz.y = n1;
grid_sz.z = n0;
}else if(deriv_layer_ind == 2){ // F2 deriv
thread_sz.x = s1*s1*s3*s3;
output_sz = N_C * N_C * n2 * n1 * s2 * s2;
grid_sz.x = N_C * N_C * s2 * s2;
grid_sz.y = n2;
grid_sz.z = n1;
}else if(deriv_layer_ind == 3){ // F3 deriv
thread_sz.x = s1*s1*s2;//*s2;
output_sz = N_C * N_C * n3 * n2 * s3 * s3;
grid_sz.x = N_C * N_C * s3 * s3;
grid_sz.y = n3;
grid_sz.z = n2;
}else if(deriv_layer_ind == 4){ // FL deriv
thread_sz.x = s1*s1*s2;//*s2;
output_sz = N_C * n3 * max_output_sz3 * max_output_sz3;
grid_sz.x = N_C * max_output_sz3;
grid_sz.y = max_output_sz3;
grid_sz.z = n3;
}
deriv_layer_ind_res[g][output_ind] = deriv_layer_ind;
/////////////////////////////////// cuda mem
cudaMalloc((void**) &sum_res_c[g][output_ind], output_sz * DATA_TYPE_SZ); CHECK_CUDA_ERR
// indexing products
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1_n0_n1s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l]*n3s[g][l]*s2s[g][l]*s2s[g][l]*n2s[g][l]*s1s[g][l]*s1s[g][l]*n0s[g][l]*n1s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1_n0s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l]*n3s[g][l]*s2s[g][l]*s2s[g][l]*n2s[g][l]*s1s[g][l]*s1s[g][l]*n0s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l]*n3s[g][l]*s2s[g][l]*s2s[g][l]*n2s[g][l]*s1s[g][l]*s1s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l]*n3s[g][l]*s2s[g][l]*s2s[g][l]*n2s[g][l]*s1s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l]*n3s[g][l]*s2s[g][l]*s2s[g][l]*n2s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l]*n3s[g][l]*s2s[g][l]*s2s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3_n3_s2s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l]*n3s[g][l]*s2s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3_n3s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l]*n3s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3_s3s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l]*s3s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3_s3s = max_output_sz3s[g][l]*max_output_sz3s[g][l]*s3s[g][l];
IND_DTYPE max_output_sz3_max_output_sz3s = max_output_sz3s[g][l]*max_output_sz3s[g][l];
IND_DTYPE max_output_sz3s_local = max_output_sz3s[g][l];
IND_DTYPE z2b = 1;
// check which dims should be broadcasted
if(n1s[g][l] != n1){
max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1_n0s = 0;
}
if(n0s[g][l] != n0){
max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1s = 0;
}
if(s1s[g][l] != s1){
max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2s = 0;
max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1s = 0;
}
if(n2s[g][l] != n2){
max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2s = 0;
}
if(s2s[g][l] != s2){
max_output_sz3_max_output_sz3_s3_s3_n3s = 0;
max_output_sz3_max_output_sz3_s3_s3_n3_s2s = 0;
}
if(s3s[g][l] != s3){
max_output_sz3_max_output_sz3s = 0;
max_output_sz3_max_output_sz3_s3s = 0;
}
if(n3s[g][l] != n3){
max_output_sz3_max_output_sz3_s3_s3s = 0;
}
if(max_output_sz3s[g][l] != max_output_sz3){
max_output_sz3s_local = 0;
z2b = 0;
}
//////////////////////////////////////////////////////////////////////////
kernel_deriv <<< grid_sz, thread_sz, DATA_TYPE_SZ >>> (sum_res_c[g][output_ind], sigma31s_c[g][l], F1s_c[g], F2s_c[g], F3s_c[g], FLs_c[g],
max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1_n0_n1s, max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1_n0s,
max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1_s1s, max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2_s1s, max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2_n2s,
max_output_sz3_max_output_sz3_s3_s3_n3_s2_s2s, max_output_sz3_max_output_sz3_s3_s3_n3_s2s, max_output_sz3_max_output_sz3_s3_s3_n3s, max_output_sz3_max_output_sz3_s3_s3s,
max_output_sz3_max_output_sz3_s3s, max_output_sz3_max_output_sz3s, z2b, n0, n0s[g][l], n1, n1s[g][l], n2, n2s[g][l], n3, n3s[g][l],
max_output_sz3, max_output_sz3s_local, s1, s1s[g][l], s2, s2s[g][l], s3, s3s[g][l], N_C, deriv_layer_ind);
CHECK_CUDA_ERR
Py_INCREF(Py_None);
return Py_None;
}
|
25b18af59b05ca9ce66850c6098866a367fc50f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
MIT License
Copyright (c) 2016 Antti-Pekka Hynninen
Copyright (c) 2016 Oak Ridge National Laboratory (UT-Batelle)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*******************************************************************************/
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/pipeline/operators/transpose/cutt/cuttkernel.h"
#include <iostream>
#include "dali/util/dynlink_cuda.h"
#include "dali/util/cuda_utils.h"
#include "dali/pipeline/operators/transpose/cutt/CudaUtils.h"
#include "dali/pipeline/operators/transpose/cutt/LRUCache.h"
#define RESTRICT __restrict__
//
// Transpose when Mm and Mk don't overlap and contain only single rank
//
// dim3 numthread(TILEDIM, TILEROWS, 1);
// dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMk-1)/TILEDIM+1), 1, plan.volMbar);
//
template <typename T>
__global__ void transposeTiled(
const int numMm, const int volMbar, const int sizeMbar,
const int2 tiledVol, const int cuDimMk, const int cuDimMm,
const TensorConvInOut* RESTRICT glMbar,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
// Shared memory
__shared__ T shTile[TILEDIM][TILEDIM+1];
const int warpLane = threadIdx.x & (warpSize - 1);
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = glMbar[warpLane];
}
const int bx = (blockIdx.x % numMm)*TILEDIM;
const int by = (blockIdx.x / numMm)*TILEDIM;
const int xin = bx + threadIdx.x;
const int yin = by + threadIdx.y;
const int xout = bx + threadIdx.y;
const int yout = by + threadIdx.x;
const unsigned int maskIny = __ballot_sync(FULL_MASK, (yin + warpLane < tiledVol.y))*(xin < tiledVol.x);
const unsigned int maskOutx = __ballot_sync(FULL_MASK, (xout + warpLane < tiledVol.x))*(yout < tiledVol.y);
const int posMinorIn = xin + yin*cuDimMk;
const int posMinorOut = yout + xout*cuDimMm;
const int posInAdd = TILEROWS*cuDimMk;
const int posOutAdd = TILEROWS*cuDimMm;
for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z)
{
// Compute global memory positions
int posMajorIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in;
int posMajorOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMajorIn += __shfl_xor_sync(FULL_MASK, posMajorIn, i);
posMajorOut += __shfl_xor_sync(FULL_MASK, posMajorOut, i);
}
int posIn = posMajorIn + posMinorIn;
int posOut = posMajorOut + posMinorOut;
// Read from global memory
__syncthreads();
// Read data into shared memory tile
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
// int pos = posIn + j*cuDimMk;
// if (xin < readVol.x && yin + j < readVol.y) {
if ((maskIny & (1 << j)) != 0) {
shTile[threadIdx.y + j][threadIdx.x] = dataIn[posIn];
}
posIn += posInAdd;
}
// Write to global memory
__syncthreads();
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
// int pos = posOut + j*cuDimMm;
// if (xout + j < readVol.x && yout < readVol.y) {
if ((maskOutx & (1 << j)) != 0 ) {
dataOut[posOut] = shTile[threadIdx.x][threadIdx.y + j];
}
posOut += posOutAdd;
}
}
}
//
// Packed transpose. Thread block loads plan.volMmk number of elements
//
template <typename T, int numRegStorage>
__global__ void transposePacked(
const int volMmk, const int volMbar,
const int sizeMmk, const int sizeMbar,
const TensorConvInOut* RESTRICT gl_Mmk,
const TensorConvInOut* RESTRICT gl_Mbar,
const TensorConv* RESTRICT gl_Msh,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
// Shared memory. volMmk elements
extern __shared__ char shBuffer_char[];
T* shBuffer = (T *)shBuffer_char;
const int warpLane = threadIdx.x & (warpSize - 1);
TensorConvInOut Mmk;
Mmk.c_in = 1;
Mmk.d_in = 1;
Mmk.c_out = 1;
Mmk.d_out = 1;
if (warpLane < sizeMmk) {
Mmk = gl_Mmk[warpLane];
}
TensorConv Msh;
Msh.c = 1;
Msh.d = 1;
if (warpLane < sizeMmk) {
Msh = gl_Msh[warpLane];
}
// Pre-compute tensor positions in Mmk
// 3*numRegStorage registers
int posMmkIn[numRegStorage];
int posMmkOut[numRegStorage];
int posSh[numRegStorage];
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
posMmkIn[j] = 0;
posMmkOut[j] = 0;
posSh[j] = 0;
}
for (int i=0;i < sizeMmk;i++) {
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
posMmkIn[j] += ((posMmk / __shfl_sync(FULL_MASK, Mmk.c_in,i)) % __shfl_sync(FULL_MASK, Mmk.d_in,i))*__shfl_sync(FULL_MASK, Mmk.ct_in,i);
posMmkOut[j] += ((posMmk / __shfl_sync(FULL_MASK, Mmk.c_out,i)) % __shfl_sync(FULL_MASK, Mmk.d_out,i))*__shfl_sync(FULL_MASK, Mmk.ct_out,i);
posSh[j] += ((posMmk / __shfl_sync(FULL_MASK, Msh.c,i)) % __shfl_sync(FULL_MASK, Msh.d,i))*__shfl_sync(FULL_MASK, Msh.ct,i);
}
}
// 6 registers
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = gl_Mbar[warpLane];
}
for (int posMbar=blockIdx.x;posMbar < volMbar;posMbar += gridDim.x)
{
int posMbarOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMbarOut += __shfl_xor_sync(FULL_MASK, posMbarOut, i);
}
int posMbarIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMbarIn += __shfl_xor_sync(FULL_MASK, posMbarIn, i);
}
__syncthreads();
// Read from global memory
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
int posIn = posMbarIn + posMmkIn[j];
if (posMmk < volMmk) shBuffer[posMmk] = dataIn[posIn];
}
__syncthreads();
// Write to global memory
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
int posOut = posMbarOut + posMmkOut[j];
if (posMmk < volMmk) dataOut[posOut] = shBuffer[posSh[j]];
}
}
}
//
// Packed method with a split rank
//
// dim nthread(((volMmkWithSplit - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize, 1, 1)
// dim nblock(ts.numSplit, min(256, max(1, ts.volMbar)), 1)
//
template <typename T, int numRegStorage>
__global__ void transposePackedSplit(
const int splitDim, const int volMmkUnsplit, const int volMbar,
const int sizeMmk, const int sizeMbar,
const int cMmSplit, const int cMkSplit,
const TensorConvInOut* RESTRICT glMmk,
const TensorConvInOut* RESTRICT glMbar,
const TensorConv* RESTRICT glMsh,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
// Shared memory. max(volSplit)*volMmkUnsplit T elements
extern __shared__ char shBuffer_char[];
T* shBuffer = (T *)shBuffer_char;
const int warpLane = threadIdx.x & (warpSize - 1);
// const int plusone = (blockIdx.x < (splitDim % gridDim.x));
const int p0 = blockIdx.x*splitDim/gridDim.x;
const int volSplit = (blockIdx.x + 1)*splitDim/gridDim.x - p0;
const int plusone = volSplit - splitDim/gridDim.x;
TensorConvInOut Mmk;
Mmk.c_in = 1;
Mmk.d_in = 1;
Mmk.c_out = 1;
Mmk.d_out = 1;
if (warpLane < sizeMmk) {
Mmk = glMmk[warpLane + plusone*sizeMmk];
}
TensorConv Msh;
Msh.c = 1;
Msh.d = 1;
if (warpLane < sizeMmk) {
Msh = glMsh[warpLane + plusone*sizeMmk];
}
// gridDim.x = number of splits
// blockIdx.x = {0 ... gridDim.x - 1} is the split-index
// Volume of this split
// const int volSplit = (splitDim/gridDim.x) + plusone;
// Start position in this split
// const int p0 = (splitDim/gridDim.x)*blockIdx.x + min(blockIdx.x, (splitDim % gridDim.x));
const int posMmkIn0 = p0*cMmSplit;
const int posMmkOut0 = p0*cMkSplit;
// Volume of split Mmk
const int volMmkSplit = volSplit*volMmkUnsplit;
// Pre-compute tensor positions in Mmk
// 3*numRegStorage registers
int posMmkIn[numRegStorage];
int posMmkOut[numRegStorage];
int posSh[numRegStorage];
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
posMmkIn[j] = posMmkIn0;
posMmkOut[j] = posMmkOut0;
posSh[j] = 0;
}
for (int i=0;i < sizeMmk;i++) {
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int t = threadIdx.x + j*blockDim.x;
posMmkIn[j] += ((t/__shfl_sync(FULL_MASK, Mmk.c_in,i)) % __shfl_sync(FULL_MASK, Mmk.d_in,i))*__shfl_sync(FULL_MASK, Mmk.ct_in,i);
posMmkOut[j] += ((t/__shfl_sync(FULL_MASK, Mmk.c_out,i)) % __shfl_sync(FULL_MASK, Mmk.d_out,i))*__shfl_sync(FULL_MASK, Mmk.ct_out,i);
posSh[j] += ((t/__shfl_sync(FULL_MASK, Msh.c,i)) % __shfl_sync(FULL_MASK, Msh.d,i))*__shfl_sync(FULL_MASK, Msh.ct,i);
}
}
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = glMbar[warpLane];
}
const int posMbar0 = blockIdx.y*volMbar/gridDim.y;
const int posMbar1 = (blockIdx.y + 1)*volMbar/gridDim.y;
for (int posMbar=posMbar0;posMbar < posMbar1;posMbar++)
// for (int posMbar=blockIdx.y;posMbar < volMbar;posMbar+=gridDim.y)
{
int posMbarOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMbarOut += __shfl_xor_sync(FULL_MASK, posMbarOut, i);
}
int posMbarIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMbarIn += __shfl_xor_sync(FULL_MASK, posMbarIn, i);
}
// Read from global memory
__syncthreads();
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
int posIn = posMbarIn + posMmkIn[j];
if (posMmk < volMmkSplit) shBuffer[posMmk] = dataIn[posIn];
}
// Write to global memory
__syncthreads();
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
int posOut = posMbarOut + posMmkOut[j];
if (posMmk < volMmkSplit) dataOut[posOut] = shBuffer[posSh[j]];
}
}
}
//
// Transpose when the lead dimension is the same, e.g. (1, 2, 3) -> (1, 3, 2)
//
// dim3 numthread(TILEDIM, TILEROWS, 1);
// dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMkBar-1)/TILEDIM+1), 1, plan.volMbar);
//
template <typename T>
__global__ void transposeTiledCopy(
const int numMm, const int volMbar, const int sizeMbar,
const int cuDimMk, const int cuDimMm,
const int2 tiledVol,
const TensorConvInOut* RESTRICT gl_Mbar,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
const int warpLane = threadIdx.x & (warpSize - 1);
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = gl_Mbar[warpLane];
}
const int bx = (blockIdx.x % numMm)*TILEDIM;
const int by = (blockIdx.x / numMm)*TILEDIM;
const int x = bx + threadIdx.x;
const int y = by + threadIdx.y;
const unsigned int mask = __ballot_sync(FULL_MASK, (y + warpLane < tiledVol.y))*(x < tiledVol.x);
const int posMinorIn = x + y*cuDimMk;
const int posMinorOut = x + y*cuDimMm;
const int posInAdd = TILEROWS*cuDimMk;
const int posOutAdd = TILEROWS*cuDimMm;
for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z)
{
// Compute global memory positions
int posMajorIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in;
int posMajorOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMajorIn += __shfl_xor_sync(FULL_MASK, posMajorIn, i);
posMajorOut += __shfl_xor_sync(FULL_MASK, posMajorOut, i);
}
int posIn = posMajorIn + posMinorIn;
int posOut = posMajorOut + posMinorOut;
// Variables where values are stored
T val[TILEDIM/TILEROWS];
// Read global memory
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
// if ((x < tiledVol.x) && (y + j < tiledVol.y)) {
if ((mask & (1 << j)) != 0) {
val[j/TILEROWS] = dataIn[posIn];
}
posIn += posInAdd;
}
// Write global memory
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
// if ((x < tiledVol.x) && (y + j < tiledVol.y)) {
if ((mask & (1 << j)) != 0) {
dataOut[posOut] = val[j/TILEROWS];
}
posOut += posOutAdd;
}
}
}
//######################################################################################
//######################################################################################
//######################################################################################
//
// Sets shared memory bank configuration for all kernels. Needs to be called once per device.
//
void cuttKernelSetSharedMemConfig() {
#define CALL(NREG) CUDA_CALL(hipFuncSetSharedMemConfig(transposePacked<float, NREG>, hipSharedMemBankSizeFourByte ))
#include "calls.h"
#undef CALL
#define CALL(NREG) CUDA_CALL(hipFuncSetSharedMemConfig(transposePacked<double, NREG>, hipSharedMemBankSizeEightByte ))
#include "calls.h"
#undef CALL
#define CALL(NREG) CUDA_CALL(hipFuncSetSharedMemConfig(transposePackedSplit<float, NREG>, hipSharedMemBankSizeFourByte ))
#include "calls.h"
#undef CALL
#define CALL(NREG) CUDA_CALL(hipFuncSetSharedMemConfig(transposePackedSplit<double, NREG>, hipSharedMemBankSizeEightByte ))
#include "calls.h"
#undef CALL
CUDA_CALL(hipFuncSetSharedMemConfig(transposeTiled<float>, hipSharedMemBankSizeFourByte));
CUDA_CALL(hipFuncSetSharedMemConfig(transposeTiledCopy<float>, hipSharedMemBankSizeFourByte));
CUDA_CALL(hipFuncSetSharedMemConfig(transposeTiled<double>, hipSharedMemBankSizeEightByte));
CUDA_CALL(hipFuncSetSharedMemConfig(transposeTiledCopy<double>, hipSharedMemBankSizeEightByte));
}
// Caches for PackedSplit kernels. One cache for all devices
// NOTE: Not thread safe
const int CACHE_SIZE = 100000;
const int MAX_NUMWARP = (1024/32);
const int MAX_NUMTYPE = 2;
static int numDevices = -1;
LRUCache<unsigned long long int, int> nabCache(CACHE_SIZE, -1);
//
// Returns the maximum number of active blocks per SM
//
int getNumActiveBlock(const int method, const int sizeofType, const LaunchConfig& lc,
const int deviceID, const hipDeviceProp_t& prop) {
int numActiveBlock = -1; // default init to silent warnings
int numthread = lc.numthread.x * lc.numthread.y * lc.numthread.z;
switch(method) {
case Trivial:
{
// This value does not matter, but should be > 0
numActiveBlock = 1;
}
break;
case Packed:
{
#define CALL0(TYPE, NREG) \
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock, \
transposePacked<TYPE, NREG>, numthread, lc.shmemsize)
switch(lc.numRegStorage) {
#define CALL(ICASE) case ICASE: if (sizeofType == 1) CALL0(char, ICASE); \
if (sizeofType == 2) CALL0(short, ICASE); \
if (sizeofType == 4) CALL0(float, ICASE); \
if (sizeofType == 8) CALL0(double, ICASE); \
break
#include "calls.h"
}
#undef CALL
#undef CALL0
}
break;
case PackedSplit:
{
// Allocate cache structure if needed
if (numDevices == -1) {
CUDA_CALL(hipGetDeviceCount(&numDevices));
}
// Build unique key for cache
int key_warp = (numthread/prop.warpSize - 1);
if (key_warp >= MAX_NUMWARP) {
printf("getNumActiveBlock maximum number of warps exceeded\n");
exit(1);
}
int key_reg = (lc.numRegStorage - 1);
int key_type = (sizeofType == 4);
unsigned long long int key =
(unsigned long long int)(lc.shmemsize/sizeofType)*MAX_NUMWARP*MAX_REG_STORAGE*MAX_NUMTYPE*numDevices +
(unsigned long long int)deviceID*MAX_NUMWARP*MAX_REG_STORAGE*MAX_NUMTYPE +
(unsigned long long int)key_type*MAX_NUMWARP*MAX_REG_STORAGE +
(unsigned long long int)key_reg*MAX_NUMWARP +
(unsigned long long int)key_warp;
numActiveBlock = nabCache.get(key);
if (numActiveBlock == -1) {
// key not found in cache, determine value and add it to cache
#define CALL0(TYPE, NREG) \
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock, \
transposePackedSplit<TYPE, NREG>, numthread, lc.shmemsize)
switch(lc.numRegStorage) {
#define CALL(ICASE) case ICASE: if (sizeofType == 1) CALL0(char, ICASE); \
if (sizeofType == 2) CALL0(short, ICASE); \
if (sizeofType == 4) CALL0(float, ICASE); \
if (sizeofType == 8) CALL0(double, ICASE); \
break
#include "calls.h"
}
#undef CALL
#undef CALL0
nabCache.set(key, numActiveBlock);
}
}
break;
case Tiled:
{
if (sizeofType == 1) {
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiled<char>, numthread, lc.shmemsize);
} else if (sizeofType == 2) {
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiled<short>, numthread, lc.shmemsize);
} else if (sizeofType == 4) {
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiled<float>, numthread, lc.shmemsize);
} else {
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiled<double>, numthread, lc.shmemsize);
}
}
break;
case TiledCopy:
{
if (sizeofType == 1) {
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiledCopy<char>, numthread, lc.shmemsize);
} else if (sizeofType == 2) {
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiledCopy<short>, numthread, lc.shmemsize);
} else if (sizeofType == 4) {
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiledCopy<float>, numthread, lc.shmemsize);
} else {
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiledCopy<double>, numthread, lc.shmemsize);
}
}
break;
}
return numActiveBlock;
}
//
// Sets up kernel launch configuration
//
// Returns the number of active blocks per SM that can be achieved on the Packed kernel
// NOTE: Returns 0 when kernel execution is not possible
//
// Sets:
// lc.numthread
// lc.numblock
// lc.shmemsize
// lc.numRegStorage (for Packed method)
//
int cuttKernelLaunchConfiguration(const int sizeofType, const TensorSplit& ts,
const int deviceID, const hipDeviceProp_t& prop, LaunchConfig& lc) {
// Return value of numActiveBlock
int numActiveBlockReturn = -1;
switch(ts.method) {
case Trivial:
{
// These values don't matter
lc.numthread.x = 1;
lc.numthread.y = 1;
lc.numthread.z = 1;
lc.numblock.x = 1;
lc.numblock.y = 1;
lc.numblock.z = 1;
lc.numblock.z = 1;
lc.numblock.z = 1;
lc.shmemsize = 0;
lc.numRegStorage = 0;
}
break;
case Packed:
{
// Amount of shared memory required
lc.shmemsize = ts.shmemAlloc(sizeofType); //ts.volMmk*sizeofType;
// Check that we're not using too much shared memory per block
if (lc.shmemsize > prop.sharedMemPerBlock) {
// printf("lc.shmemsize %d prop.sharedMemPerBlock %d\n", lc.shmemsize, prop.sharedMemPerBlock);
return 0;
}
// Min and max number of threads we can use
int minNumthread = ((ts.volMmk - 1)/(prop.warpSize*MAX_REG_STORAGE) + 1)*prop.warpSize;
int maxNumthread = ((ts.volMmk - 1)/(prop.warpSize) + 1)*prop.warpSize;
if (minNumthread > prop.maxThreadsPerBlock) return 0;
maxNumthread = min(prop.maxThreadsPerBlock, maxNumthread);
// printf("minNumthread %d maxNumthread %d\n", minNumthread, maxNumthread);
// Min and max number of register storage we can use
int minNumRegStorage = (ts.volMmk - 1)/maxNumthread + 1;
int maxNumRegStorage = (ts.volMmk - 1)/minNumthread + 1;
// printf("minNumRegStorage %d maxNumRegStorage %d\n", minNumRegStorage, maxNumRegStorage);
int bestVal = 0;
int bestNumRegStorage = 0;
int bestNumActiveBlock = 0;
lc.numthread.y = 1;
lc.numthread.z = 1;
lc.numblock.x = max(1, ts.volMbar);
lc.numblock.x = min(prop.multiProcessorCount*18, lc.numblock.x);
lc.numblock.y = 1;
lc.numblock.z = 1;
for (lc.numRegStorage=minNumRegStorage;lc.numRegStorage <= maxNumRegStorage;lc.numRegStorage++) {
lc.numthread.x = ((ts.volMmk - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize;
int numActiveBlock = getNumActiveBlock(ts.method, sizeofType, lc, deviceID, prop);
// int val = numActiveBlock*lc.numthread.x;
int val = ts.volMmkUsed()*numActiveBlock;
if (val > bestVal) {
bestVal = val;
bestNumRegStorage = lc.numRegStorage;
bestNumActiveBlock = numActiveBlock;
}
}
if (bestNumRegStorage == 0) return 0;
lc.numRegStorage = bestNumRegStorage;
lc.numthread.x = ((ts.volMmk - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize;
numActiveBlockReturn = bestNumActiveBlock;
}
break;
case PackedSplit:
{
// Amount of shared memory required
lc.shmemsize = ts.shmemAlloc(sizeofType);
// Check that we're not using too much shared memory per block
if (lc.shmemsize > prop.sharedMemPerBlock) {
// printf("lc.shmemsize %d prop.sharedMemPerBlock %d\n", lc.shmemsize, prop.sharedMemPerBlock);
return 0;
}
int volMmkWithSplit = (ts.splitDim/ts.numSplit + ((ts.splitDim % ts.numSplit) > 0))*ts.volMmkUnsplit;
// Min and max number of threads we can use
int minNumthread = ((volMmkWithSplit - 1)/(prop.warpSize*MAX_REG_STORAGE) + 1)*prop.warpSize;
int maxNumthread = ((volMmkWithSplit - 1)/(prop.warpSize) + 1)*prop.warpSize;
if (minNumthread > prop.maxThreadsPerBlock) return 0;
maxNumthread = min(prop.maxThreadsPerBlock, maxNumthread);
// printf("minNumthread %d maxNumthread %d\n", minNumthread, maxNumthread);
// Min and max number of register storage we can use
int minNumRegStorage = (volMmkWithSplit - 1)/maxNumthread + 1;
int maxNumRegStorage = (volMmkWithSplit - 1)/minNumthread + 1;
// printf("minNumRegStorage %d maxNumRegStorage %d\n", minNumRegStorage, maxNumRegStorage);
int bestVal = 0;
int bestNumRegStorage = 0;
int bestNumActiveBlock = 0;
lc.numthread.y = 1;
lc.numthread.z = 1;
lc.numblock.x = ts.numSplit;
lc.numblock.y = max(1, min((prop.multiProcessorCount*18)/lc.numblock.x, ts.volMbar));
lc.numblock.z = 1;
for (lc.numRegStorage=minNumRegStorage;lc.numRegStorage <= maxNumRegStorage;lc.numRegStorage++) {
lc.numthread.x = ((volMmkWithSplit - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize;
int numActiveBlock = getNumActiveBlock(ts.method, sizeofType, lc, deviceID, prop);
// int val = numActiveBlock*lc.numthread.x*lc.numRegStorage;
int val = ts.volMmkUsed()*numActiveBlock;
if (val > bestVal) {
bestVal = val;
bestNumRegStorage = lc.numRegStorage;
bestNumActiveBlock = numActiveBlock;
}
}
if (bestNumRegStorage == 0) return 0;
lc.numRegStorage = bestNumRegStorage;
lc.numthread.x = ((volMmkWithSplit - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize;
numActiveBlockReturn = bestNumActiveBlock;
}
break;
case Tiled:
{
lc.numthread.x = TILEDIM;
lc.numthread.y = TILEROWS;
lc.numthread.z = 1;
lc.numblock.x = ((ts.volMm - 1)/TILEDIM + 1)*((ts.volMk - 1)/TILEDIM + 1);
lc.numblock.y = 1;
lc.numblock.z = max(1, min((prop.multiProcessorCount*8)/(lc.numblock.x*lc.numblock.y), ts.volMbar));
lc.shmemsize = 0;
lc.numRegStorage = 0;
}
break;
case TiledCopy:
{
lc.numthread.x = TILEDIM;
lc.numthread.y = TILEROWS;
lc.numthread.z = 1;
lc.numblock.x = ((ts.volMm - 1)/TILEDIM + 1)*((ts.volMkBar - 1)/TILEDIM + 1);
lc.numblock.y = 1;
lc.numblock.z = ts.volMbar;
lc.numblock.z = min((prop.multiProcessorCount*8)/(lc.numblock.x*lc.numblock.y), lc.numblock.z);
lc.numblock.z = max(1, lc.numblock.z);
lc.shmemsize = 0;
lc.numRegStorage = 0;
}
break;
}
if (lc.numblock.x > static_cast<unsigned int>(prop.maxGridSize[0]) ||
lc.numblock.y > static_cast<unsigned int>(prop.maxGridSize[1]) ||
lc.numblock.z > static_cast<unsigned int>(prop.maxGridSize[2])) return 0;
// Return the number of active blocks with these settings
if (numActiveBlockReturn == -1) {
// Not set, get it
numActiveBlockReturn = getNumActiveBlock(ts.method, sizeofType, lc, deviceID, prop);
}
return numActiveBlockReturn;
}
bool cuttKernel(cuttPlan_t& plan, const void* dataIn, void* dataOut) {
LaunchConfig& lc = plan.launchConfig;
TensorSplit& ts = plan.tensorSplit;
switch(ts.method) {
case Trivial:
{
CUDA_CALL(hipMemcpyAsync(dataOut, dataIn, ts.volMmk*ts.volMbar*plan.sizeofType,
hipMemcpyDeviceToDevice, plan.stream));
}
break;
case Packed:
{
switch(lc.numRegStorage) {
#define CALL0(TYPE, NREG) \
hipLaunchKernelGGL(( transposePacked<TYPE, NREG>) , dim3(lc.numblock), dim3(lc.numthread), lc.shmemsize, plan.stream , \
ts.volMmk, ts.volMbar, ts.sizeMmk, ts.sizeMbar, \
plan.Mmk, plan.Mbar, plan.Msh, (const TYPE *)dataIn, (TYPE *)dataOut)
#define CALL(ICASE) case ICASE: if (plan.sizeofType == 1) CALL0(char, ICASE); \
if (plan.sizeofType == 2) CALL0(short, ICASE); \
if (plan.sizeofType == 4) CALL0(float, ICASE); \
if (plan.sizeofType == 8) CALL0(double, ICASE); \
break
#include "calls.h"
default:
printf("cuttKernel no template implemented for numRegStorage %d\n", lc.numRegStorage);
return false;
#undef CALL
#undef CALL0
}
}
break;
case PackedSplit:
{
switch(lc.numRegStorage) {
#define CALL0(TYPE, NREG) \
hipLaunchKernelGGL(( transposePackedSplit<TYPE, NREG>) , dim3(lc.numblock), dim3(lc.numthread), lc.shmemsize, plan.stream , \
ts.splitDim, ts.volMmkUnsplit, ts. volMbar, ts.sizeMmk, ts.sizeMbar, \
plan.cuDimMm, plan.cuDimMk, plan.Mmk, plan.Mbar, plan.Msh, (const TYPE *)dataIn, (TYPE *)dataOut)
#define CALL(ICASE) case ICASE: if (plan.sizeofType == 1) CALL0(char, ICASE); \
if (plan.sizeofType == 2) CALL0(short, ICASE); \
if (plan.sizeofType == 4) CALL0(float, ICASE); \
if (plan.sizeofType == 8) CALL0(double, ICASE); \
break
#include "calls.h"
default:
printf("cuttKernel no template implemented for numRegStorage %d\n", lc.numRegStorage);
return false;
#undef CALL
#undef CALL0
}
}
break;
case Tiled:
{
#define CALL(TYPE) \
hipLaunchKernelGGL(( transposeTiled<TYPE>) , dim3(lc.numblock), dim3(lc.numthread), 0, plan.stream , \
((ts.volMm - 1)/TILEDIM + 1), ts.volMbar, ts.sizeMbar, plan.tiledVol, plan.cuDimMk, plan.cuDimMm, \
plan.Mbar, (const TYPE *)dataIn, (TYPE *)dataOut)
if (plan.sizeofType == 1) CALL(char);
if (plan.sizeofType == 2) CALL(short);
if (plan.sizeofType == 4) CALL(float);
if (plan.sizeofType == 8) CALL(double);
#undef CALL
}
break;
case TiledCopy:
{
#define CALL(TYPE) \
hipLaunchKernelGGL(( transposeTiledCopy<TYPE>) , dim3(lc.numblock), dim3(lc.numthread), 0, plan.stream , \
((ts.volMm - 1)/TILEDIM + 1), ts.volMbar, ts.sizeMbar, plan.cuDimMk, plan.cuDimMm, plan.tiledVol, \
plan.Mbar, (const TYPE *)dataIn, (TYPE *)dataOut)
if (plan.sizeofType == 1) CALL(char);
if (plan.sizeofType == 2) CALL(short);
if (plan.sizeofType == 4) CALL(float);
if (plan.sizeofType == 8) CALL(double);
#undef CALL
}
break;
}
CUDA_CALL(hipGetLastError());
return true;
}
| 25b18af59b05ca9ce66850c6098866a367fc50f3.cu | /******************************************************************************
MIT License
Copyright (c) 2016 Antti-Pekka Hynninen
Copyright (c) 2016 Oak Ridge National Laboratory (UT-Batelle)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*******************************************************************************/
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/pipeline/operators/transpose/cutt/cuttkernel.h"
#include <iostream>
#include "dali/util/dynlink_cuda.h"
#include "dali/util/cuda_utils.h"
#include "dali/pipeline/operators/transpose/cutt/CudaUtils.h"
#include "dali/pipeline/operators/transpose/cutt/LRUCache.h"
#define RESTRICT __restrict__
//
// Transpose when Mm and Mk don't overlap and contain only single rank
//
// dim3 numthread(TILEDIM, TILEROWS, 1);
// dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMk-1)/TILEDIM+1), 1, plan.volMbar);
//
template <typename T>
__global__ void transposeTiled(
const int numMm, const int volMbar, const int sizeMbar,
const int2 tiledVol, const int cuDimMk, const int cuDimMm,
const TensorConvInOut* RESTRICT glMbar,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
// Shared memory
__shared__ T shTile[TILEDIM][TILEDIM+1];
const int warpLane = threadIdx.x & (warpSize - 1);
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = glMbar[warpLane];
}
const int bx = (blockIdx.x % numMm)*TILEDIM;
const int by = (blockIdx.x / numMm)*TILEDIM;
const int xin = bx + threadIdx.x;
const int yin = by + threadIdx.y;
const int xout = bx + threadIdx.y;
const int yout = by + threadIdx.x;
const unsigned int maskIny = __ballot_sync(FULL_MASK, (yin + warpLane < tiledVol.y))*(xin < tiledVol.x);
const unsigned int maskOutx = __ballot_sync(FULL_MASK, (xout + warpLane < tiledVol.x))*(yout < tiledVol.y);
const int posMinorIn = xin + yin*cuDimMk;
const int posMinorOut = yout + xout*cuDimMm;
const int posInAdd = TILEROWS*cuDimMk;
const int posOutAdd = TILEROWS*cuDimMm;
for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z)
{
// Compute global memory positions
int posMajorIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in;
int posMajorOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMajorIn += __shfl_xor_sync(FULL_MASK, posMajorIn, i);
posMajorOut += __shfl_xor_sync(FULL_MASK, posMajorOut, i);
}
int posIn = posMajorIn + posMinorIn;
int posOut = posMajorOut + posMinorOut;
// Read from global memory
__syncthreads();
// Read data into shared memory tile
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
// int pos = posIn + j*cuDimMk;
// if (xin < readVol.x && yin + j < readVol.y) {
if ((maskIny & (1 << j)) != 0) {
shTile[threadIdx.y + j][threadIdx.x] = dataIn[posIn];
}
posIn += posInAdd;
}
// Write to global memory
__syncthreads();
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
// int pos = posOut + j*cuDimMm;
// if (xout + j < readVol.x && yout < readVol.y) {
if ((maskOutx & (1 << j)) != 0 ) {
dataOut[posOut] = shTile[threadIdx.x][threadIdx.y + j];
}
posOut += posOutAdd;
}
}
}
//
// Packed transpose. Thread block loads plan.volMmk number of elements
//
template <typename T, int numRegStorage>
__global__ void transposePacked(
const int volMmk, const int volMbar,
const int sizeMmk, const int sizeMbar,
const TensorConvInOut* RESTRICT gl_Mmk,
const TensorConvInOut* RESTRICT gl_Mbar,
const TensorConv* RESTRICT gl_Msh,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
// Shared memory. volMmk elements
extern __shared__ char shBuffer_char[];
T* shBuffer = (T *)shBuffer_char;
const int warpLane = threadIdx.x & (warpSize - 1);
TensorConvInOut Mmk;
Mmk.c_in = 1;
Mmk.d_in = 1;
Mmk.c_out = 1;
Mmk.d_out = 1;
if (warpLane < sizeMmk) {
Mmk = gl_Mmk[warpLane];
}
TensorConv Msh;
Msh.c = 1;
Msh.d = 1;
if (warpLane < sizeMmk) {
Msh = gl_Msh[warpLane];
}
// Pre-compute tensor positions in Mmk
// 3*numRegStorage registers
int posMmkIn[numRegStorage];
int posMmkOut[numRegStorage];
int posSh[numRegStorage];
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
posMmkIn[j] = 0;
posMmkOut[j] = 0;
posSh[j] = 0;
}
for (int i=0;i < sizeMmk;i++) {
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
posMmkIn[j] += ((posMmk / __shfl_sync(FULL_MASK, Mmk.c_in,i)) % __shfl_sync(FULL_MASK, Mmk.d_in,i))*__shfl_sync(FULL_MASK, Mmk.ct_in,i);
posMmkOut[j] += ((posMmk / __shfl_sync(FULL_MASK, Mmk.c_out,i)) % __shfl_sync(FULL_MASK, Mmk.d_out,i))*__shfl_sync(FULL_MASK, Mmk.ct_out,i);
posSh[j] += ((posMmk / __shfl_sync(FULL_MASK, Msh.c,i)) % __shfl_sync(FULL_MASK, Msh.d,i))*__shfl_sync(FULL_MASK, Msh.ct,i);
}
}
// 6 registers
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = gl_Mbar[warpLane];
}
for (int posMbar=blockIdx.x;posMbar < volMbar;posMbar += gridDim.x)
{
int posMbarOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMbarOut += __shfl_xor_sync(FULL_MASK, posMbarOut, i);
}
int posMbarIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMbarIn += __shfl_xor_sync(FULL_MASK, posMbarIn, i);
}
__syncthreads();
// Read from global memory
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
int posIn = posMbarIn + posMmkIn[j];
if (posMmk < volMmk) shBuffer[posMmk] = dataIn[posIn];
}
__syncthreads();
// Write to global memory
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
int posOut = posMbarOut + posMmkOut[j];
if (posMmk < volMmk) dataOut[posOut] = shBuffer[posSh[j]];
}
}
}
//
// Packed method with a split rank
//
// dim nthread(((volMmkWithSplit - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize, 1, 1)
// dim nblock(ts.numSplit, min(256, max(1, ts.volMbar)), 1)
//
template <typename T, int numRegStorage>
__global__ void transposePackedSplit(
const int splitDim, const int volMmkUnsplit, const int volMbar,
const int sizeMmk, const int sizeMbar,
const int cMmSplit, const int cMkSplit,
const TensorConvInOut* RESTRICT glMmk,
const TensorConvInOut* RESTRICT glMbar,
const TensorConv* RESTRICT glMsh,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
// Shared memory. max(volSplit)*volMmkUnsplit T elements
extern __shared__ char shBuffer_char[];
T* shBuffer = (T *)shBuffer_char;
const int warpLane = threadIdx.x & (warpSize - 1);
// const int plusone = (blockIdx.x < (splitDim % gridDim.x));
const int p0 = blockIdx.x*splitDim/gridDim.x;
const int volSplit = (blockIdx.x + 1)*splitDim/gridDim.x - p0;
const int plusone = volSplit - splitDim/gridDim.x;
TensorConvInOut Mmk;
Mmk.c_in = 1;
Mmk.d_in = 1;
Mmk.c_out = 1;
Mmk.d_out = 1;
if (warpLane < sizeMmk) {
Mmk = glMmk[warpLane + plusone*sizeMmk];
}
TensorConv Msh;
Msh.c = 1;
Msh.d = 1;
if (warpLane < sizeMmk) {
Msh = glMsh[warpLane + plusone*sizeMmk];
}
// gridDim.x = number of splits
// blockIdx.x = {0 ... gridDim.x - 1} is the split-index
// Volume of this split
// const int volSplit = (splitDim/gridDim.x) + plusone;
// Start position in this split
// const int p0 = (splitDim/gridDim.x)*blockIdx.x + min(blockIdx.x, (splitDim % gridDim.x));
const int posMmkIn0 = p0*cMmSplit;
const int posMmkOut0 = p0*cMkSplit;
// Volume of split Mmk
const int volMmkSplit = volSplit*volMmkUnsplit;
// Pre-compute tensor positions in Mmk
// 3*numRegStorage registers
int posMmkIn[numRegStorage];
int posMmkOut[numRegStorage];
int posSh[numRegStorage];
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
posMmkIn[j] = posMmkIn0;
posMmkOut[j] = posMmkOut0;
posSh[j] = 0;
}
for (int i=0;i < sizeMmk;i++) {
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int t = threadIdx.x + j*blockDim.x;
posMmkIn[j] += ((t/__shfl_sync(FULL_MASK, Mmk.c_in,i)) % __shfl_sync(FULL_MASK, Mmk.d_in,i))*__shfl_sync(FULL_MASK, Mmk.ct_in,i);
posMmkOut[j] += ((t/__shfl_sync(FULL_MASK, Mmk.c_out,i)) % __shfl_sync(FULL_MASK, Mmk.d_out,i))*__shfl_sync(FULL_MASK, Mmk.ct_out,i);
posSh[j] += ((t/__shfl_sync(FULL_MASK, Msh.c,i)) % __shfl_sync(FULL_MASK, Msh.d,i))*__shfl_sync(FULL_MASK, Msh.ct,i);
}
}
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = glMbar[warpLane];
}
const int posMbar0 = blockIdx.y*volMbar/gridDim.y;
const int posMbar1 = (blockIdx.y + 1)*volMbar/gridDim.y;
for (int posMbar=posMbar0;posMbar < posMbar1;posMbar++)
// for (int posMbar=blockIdx.y;posMbar < volMbar;posMbar+=gridDim.y)
{
int posMbarOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMbarOut += __shfl_xor_sync(FULL_MASK, posMbarOut, i);
}
int posMbarIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMbarIn += __shfl_xor_sync(FULL_MASK, posMbarIn, i);
}
// Read from global memory
__syncthreads();
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
int posIn = posMbarIn + posMmkIn[j];
if (posMmk < volMmkSplit) shBuffer[posMmk] = dataIn[posIn];
}
// Write to global memory
__syncthreads();
#pragma unroll
for (int j=0;j < numRegStorage;j++) {
int posMmk = threadIdx.x + j*blockDim.x;
int posOut = posMbarOut + posMmkOut[j];
if (posMmk < volMmkSplit) dataOut[posOut] = shBuffer[posSh[j]];
}
}
}
//
// Transpose when the lead dimension is the same, e.g. (1, 2, 3) -> (1, 3, 2)
//
// dim3 numthread(TILEDIM, TILEROWS, 1);
// dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMkBar-1)/TILEDIM+1), 1, plan.volMbar);
//
template <typename T>
__global__ void transposeTiledCopy(
const int numMm, const int volMbar, const int sizeMbar,
const int cuDimMk, const int cuDimMm,
const int2 tiledVol,
const TensorConvInOut* RESTRICT gl_Mbar,
const T* RESTRICT dataIn, T* RESTRICT dataOut) {
const int warpLane = threadIdx.x & (warpSize - 1);
TensorConvInOut Mbar;
Mbar.c_in = 1;
Mbar.d_in = 1;
Mbar.c_out = 1;
Mbar.d_out = 1;
if (warpLane < sizeMbar) {
Mbar = gl_Mbar[warpLane];
}
const int bx = (blockIdx.x % numMm)*TILEDIM;
const int by = (blockIdx.x / numMm)*TILEDIM;
const int x = bx + threadIdx.x;
const int y = by + threadIdx.y;
const unsigned int mask = __ballot_sync(FULL_MASK, (y + warpLane < tiledVol.y))*(x < tiledVol.x);
const int posMinorIn = x + y*cuDimMk;
const int posMinorOut = x + y*cuDimMm;
const int posInAdd = TILEROWS*cuDimMk;
const int posOutAdd = TILEROWS*cuDimMm;
for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z)
{
// Compute global memory positions
int posMajorIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in;
int posMajorOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out;
#pragma unroll
for (int i=16;i >= 1;i/=2) {
posMajorIn += __shfl_xor_sync(FULL_MASK, posMajorIn, i);
posMajorOut += __shfl_xor_sync(FULL_MASK, posMajorOut, i);
}
int posIn = posMajorIn + posMinorIn;
int posOut = posMajorOut + posMinorOut;
// Variables where values are stored
T val[TILEDIM/TILEROWS];
// Read global memory
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
// if ((x < tiledVol.x) && (y + j < tiledVol.y)) {
if ((mask & (1 << j)) != 0) {
val[j/TILEROWS] = dataIn[posIn];
}
posIn += posInAdd;
}
// Write global memory
#pragma unroll
for (int j=0;j < TILEDIM;j += TILEROWS) {
// if ((x < tiledVol.x) && (y + j < tiledVol.y)) {
if ((mask & (1 << j)) != 0) {
dataOut[posOut] = val[j/TILEROWS];
}
posOut += posOutAdd;
}
}
}
//######################################################################################
//######################################################################################
//######################################################################################
//
// Sets shared memory bank configuration for all kernels. Needs to be called once per device.
//
void cuttKernelSetSharedMemConfig() {
#define CALL(NREG) CUDA_CALL(cudaFuncSetSharedMemConfig(transposePacked<float, NREG>, cudaSharedMemBankSizeFourByte ))
#include "calls.h"
#undef CALL
#define CALL(NREG) CUDA_CALL(cudaFuncSetSharedMemConfig(transposePacked<double, NREG>, cudaSharedMemBankSizeEightByte ))
#include "calls.h"
#undef CALL
#define CALL(NREG) CUDA_CALL(cudaFuncSetSharedMemConfig(transposePackedSplit<float, NREG>, cudaSharedMemBankSizeFourByte ))
#include "calls.h"
#undef CALL
#define CALL(NREG) CUDA_CALL(cudaFuncSetSharedMemConfig(transposePackedSplit<double, NREG>, cudaSharedMemBankSizeEightByte ))
#include "calls.h"
#undef CALL
CUDA_CALL(cudaFuncSetSharedMemConfig(transposeTiled<float>, cudaSharedMemBankSizeFourByte));
CUDA_CALL(cudaFuncSetSharedMemConfig(transposeTiledCopy<float>, cudaSharedMemBankSizeFourByte));
CUDA_CALL(cudaFuncSetSharedMemConfig(transposeTiled<double>, cudaSharedMemBankSizeEightByte));
CUDA_CALL(cudaFuncSetSharedMemConfig(transposeTiledCopy<double>, cudaSharedMemBankSizeEightByte));
}
// Caches for PackedSplit kernels. One cache for all devices
// NOTE: Not thread safe
const int CACHE_SIZE = 100000;
const int MAX_NUMWARP = (1024/32);
const int MAX_NUMTYPE = 2;
static int numDevices = -1;
LRUCache<unsigned long long int, int> nabCache(CACHE_SIZE, -1);
//
// Returns the maximum number of active blocks per SM
//
int getNumActiveBlock(const int method, const int sizeofType, const LaunchConfig& lc,
const int deviceID, const cudaDeviceProp& prop) {
int numActiveBlock = -1; // default init to silent warnings
int numthread = lc.numthread.x * lc.numthread.y * lc.numthread.z;
switch(method) {
case Trivial:
{
// This value does not matter, but should be > 0
numActiveBlock = 1;
}
break;
case Packed:
{
#define CALL0(TYPE, NREG) \
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock, \
transposePacked<TYPE, NREG>, numthread, lc.shmemsize)
switch(lc.numRegStorage) {
#define CALL(ICASE) case ICASE: if (sizeofType == 1) CALL0(char, ICASE); \
if (sizeofType == 2) CALL0(short, ICASE); \
if (sizeofType == 4) CALL0(float, ICASE); \
if (sizeofType == 8) CALL0(double, ICASE); \
break
#include "calls.h"
}
#undef CALL
#undef CALL0
}
break;
case PackedSplit:
{
// Allocate cache structure if needed
if (numDevices == -1) {
CUDA_CALL(cudaGetDeviceCount(&numDevices));
}
// Build unique key for cache
int key_warp = (numthread/prop.warpSize - 1);
if (key_warp >= MAX_NUMWARP) {
printf("getNumActiveBlock maximum number of warps exceeded\n");
exit(1);
}
int key_reg = (lc.numRegStorage - 1);
int key_type = (sizeofType == 4);
unsigned long long int key =
(unsigned long long int)(lc.shmemsize/sizeofType)*MAX_NUMWARP*MAX_REG_STORAGE*MAX_NUMTYPE*numDevices +
(unsigned long long int)deviceID*MAX_NUMWARP*MAX_REG_STORAGE*MAX_NUMTYPE +
(unsigned long long int)key_type*MAX_NUMWARP*MAX_REG_STORAGE +
(unsigned long long int)key_reg*MAX_NUMWARP +
(unsigned long long int)key_warp;
numActiveBlock = nabCache.get(key);
if (numActiveBlock == -1) {
// key not found in cache, determine value and add it to cache
#define CALL0(TYPE, NREG) \
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock, \
transposePackedSplit<TYPE, NREG>, numthread, lc.shmemsize)
switch(lc.numRegStorage) {
#define CALL(ICASE) case ICASE: if (sizeofType == 1) CALL0(char, ICASE); \
if (sizeofType == 2) CALL0(short, ICASE); \
if (sizeofType == 4) CALL0(float, ICASE); \
if (sizeofType == 8) CALL0(double, ICASE); \
break
#include "calls.h"
}
#undef CALL
#undef CALL0
nabCache.set(key, numActiveBlock);
}
}
break;
case Tiled:
{
if (sizeofType == 1) {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiled<char>, numthread, lc.shmemsize);
} else if (sizeofType == 2) {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiled<short>, numthread, lc.shmemsize);
} else if (sizeofType == 4) {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiled<float>, numthread, lc.shmemsize);
} else {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiled<double>, numthread, lc.shmemsize);
}
}
break;
case TiledCopy:
{
if (sizeofType == 1) {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiledCopy<char>, numthread, lc.shmemsize);
} else if (sizeofType == 2) {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiledCopy<short>, numthread, lc.shmemsize);
} else if (sizeofType == 4) {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiledCopy<float>, numthread, lc.shmemsize);
} else {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock,
transposeTiledCopy<double>, numthread, lc.shmemsize);
}
}
break;
}
return numActiveBlock;
}
//
// Sets up kernel launch configuration
//
// Returns the number of active blocks per SM that can be achieved on the Packed kernel
// NOTE: Returns 0 when kernel execution is not possible
//
// Sets:
// lc.numthread
// lc.numblock
// lc.shmemsize
// lc.numRegStorage (for Packed method)
//
int cuttKernelLaunchConfiguration(const int sizeofType, const TensorSplit& ts,
const int deviceID, const cudaDeviceProp& prop, LaunchConfig& lc) {
// Return value of numActiveBlock
int numActiveBlockReturn = -1;
switch(ts.method) {
case Trivial:
{
// These values don't matter
lc.numthread.x = 1;
lc.numthread.y = 1;
lc.numthread.z = 1;
lc.numblock.x = 1;
lc.numblock.y = 1;
lc.numblock.z = 1;
lc.numblock.z = 1;
lc.numblock.z = 1;
lc.shmemsize = 0;
lc.numRegStorage = 0;
}
break;
case Packed:
{
// Amount of shared memory required
lc.shmemsize = ts.shmemAlloc(sizeofType); //ts.volMmk*sizeofType;
// Check that we're not using too much shared memory per block
if (lc.shmemsize > prop.sharedMemPerBlock) {
// printf("lc.shmemsize %d prop.sharedMemPerBlock %d\n", lc.shmemsize, prop.sharedMemPerBlock);
return 0;
}
// Min and max number of threads we can use
int minNumthread = ((ts.volMmk - 1)/(prop.warpSize*MAX_REG_STORAGE) + 1)*prop.warpSize;
int maxNumthread = ((ts.volMmk - 1)/(prop.warpSize) + 1)*prop.warpSize;
if (minNumthread > prop.maxThreadsPerBlock) return 0;
maxNumthread = min(prop.maxThreadsPerBlock, maxNumthread);
// printf("minNumthread %d maxNumthread %d\n", minNumthread, maxNumthread);
// Min and max number of register storage we can use
int minNumRegStorage = (ts.volMmk - 1)/maxNumthread + 1;
int maxNumRegStorage = (ts.volMmk - 1)/minNumthread + 1;
// printf("minNumRegStorage %d maxNumRegStorage %d\n", minNumRegStorage, maxNumRegStorage);
int bestVal = 0;
int bestNumRegStorage = 0;
int bestNumActiveBlock = 0;
lc.numthread.y = 1;
lc.numthread.z = 1;
lc.numblock.x = max(1, ts.volMbar);
lc.numblock.x = min(prop.multiProcessorCount*18, lc.numblock.x);
lc.numblock.y = 1;
lc.numblock.z = 1;
for (lc.numRegStorage=minNumRegStorage;lc.numRegStorage <= maxNumRegStorage;lc.numRegStorage++) {
lc.numthread.x = ((ts.volMmk - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize;
int numActiveBlock = getNumActiveBlock(ts.method, sizeofType, lc, deviceID, prop);
// int val = numActiveBlock*lc.numthread.x;
int val = ts.volMmkUsed()*numActiveBlock;
if (val > bestVal) {
bestVal = val;
bestNumRegStorage = lc.numRegStorage;
bestNumActiveBlock = numActiveBlock;
}
}
if (bestNumRegStorage == 0) return 0;
lc.numRegStorage = bestNumRegStorage;
lc.numthread.x = ((ts.volMmk - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize;
numActiveBlockReturn = bestNumActiveBlock;
}
break;
case PackedSplit:
{
// Amount of shared memory required
lc.shmemsize = ts.shmemAlloc(sizeofType);
// Check that we're not using too much shared memory per block
if (lc.shmemsize > prop.sharedMemPerBlock) {
// printf("lc.shmemsize %d prop.sharedMemPerBlock %d\n", lc.shmemsize, prop.sharedMemPerBlock);
return 0;
}
int volMmkWithSplit = (ts.splitDim/ts.numSplit + ((ts.splitDim % ts.numSplit) > 0))*ts.volMmkUnsplit;
// Min and max number of threads we can use
int minNumthread = ((volMmkWithSplit - 1)/(prop.warpSize*MAX_REG_STORAGE) + 1)*prop.warpSize;
int maxNumthread = ((volMmkWithSplit - 1)/(prop.warpSize) + 1)*prop.warpSize;
if (minNumthread > prop.maxThreadsPerBlock) return 0;
maxNumthread = min(prop.maxThreadsPerBlock, maxNumthread);
// printf("minNumthread %d maxNumthread %d\n", minNumthread, maxNumthread);
// Min and max number of register storage we can use
int minNumRegStorage = (volMmkWithSplit - 1)/maxNumthread + 1;
int maxNumRegStorage = (volMmkWithSplit - 1)/minNumthread + 1;
// printf("minNumRegStorage %d maxNumRegStorage %d\n", minNumRegStorage, maxNumRegStorage);
int bestVal = 0;
int bestNumRegStorage = 0;
int bestNumActiveBlock = 0;
lc.numthread.y = 1;
lc.numthread.z = 1;
lc.numblock.x = ts.numSplit;
lc.numblock.y = max(1, min((prop.multiProcessorCount*18)/lc.numblock.x, ts.volMbar));
lc.numblock.z = 1;
for (lc.numRegStorage=minNumRegStorage;lc.numRegStorage <= maxNumRegStorage;lc.numRegStorage++) {
lc.numthread.x = ((volMmkWithSplit - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize;
int numActiveBlock = getNumActiveBlock(ts.method, sizeofType, lc, deviceID, prop);
// int val = numActiveBlock*lc.numthread.x*lc.numRegStorage;
int val = ts.volMmkUsed()*numActiveBlock;
if (val > bestVal) {
bestVal = val;
bestNumRegStorage = lc.numRegStorage;
bestNumActiveBlock = numActiveBlock;
}
}
if (bestNumRegStorage == 0) return 0;
lc.numRegStorage = bestNumRegStorage;
lc.numthread.x = ((volMmkWithSplit - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize;
numActiveBlockReturn = bestNumActiveBlock;
}
break;
case Tiled:
{
lc.numthread.x = TILEDIM;
lc.numthread.y = TILEROWS;
lc.numthread.z = 1;
lc.numblock.x = ((ts.volMm - 1)/TILEDIM + 1)*((ts.volMk - 1)/TILEDIM + 1);
lc.numblock.y = 1;
lc.numblock.z = max(1, min((prop.multiProcessorCount*8)/(lc.numblock.x*lc.numblock.y), ts.volMbar));
lc.shmemsize = 0;
lc.numRegStorage = 0;
}
break;
case TiledCopy:
{
lc.numthread.x = TILEDIM;
lc.numthread.y = TILEROWS;
lc.numthread.z = 1;
lc.numblock.x = ((ts.volMm - 1)/TILEDIM + 1)*((ts.volMkBar - 1)/TILEDIM + 1);
lc.numblock.y = 1;
lc.numblock.z = ts.volMbar;
lc.numblock.z = min((prop.multiProcessorCount*8)/(lc.numblock.x*lc.numblock.y), lc.numblock.z);
lc.numblock.z = max(1, lc.numblock.z);
lc.shmemsize = 0;
lc.numRegStorage = 0;
}
break;
}
if (lc.numblock.x > static_cast<unsigned int>(prop.maxGridSize[0]) ||
lc.numblock.y > static_cast<unsigned int>(prop.maxGridSize[1]) ||
lc.numblock.z > static_cast<unsigned int>(prop.maxGridSize[2])) return 0;
// Return the number of active blocks with these settings
if (numActiveBlockReturn == -1) {
// Not set, get it
numActiveBlockReturn = getNumActiveBlock(ts.method, sizeofType, lc, deviceID, prop);
}
return numActiveBlockReturn;
}
bool cuttKernel(cuttPlan_t& plan, const void* dataIn, void* dataOut) {
LaunchConfig& lc = plan.launchConfig;
TensorSplit& ts = plan.tensorSplit;
switch(ts.method) {
case Trivial:
{
CUDA_CALL(cudaMemcpyAsync(dataOut, dataIn, ts.volMmk*ts.volMbar*plan.sizeofType,
cudaMemcpyDeviceToDevice, plan.stream));
}
break;
case Packed:
{
switch(lc.numRegStorage) {
#define CALL0(TYPE, NREG) \
transposePacked<TYPE, NREG> <<< lc.numblock, lc.numthread, lc.shmemsize, plan.stream >>> \
(ts.volMmk, ts.volMbar, ts.sizeMmk, ts.sizeMbar, \
plan.Mmk, plan.Mbar, plan.Msh, (const TYPE *)dataIn, (TYPE *)dataOut)
#define CALL(ICASE) case ICASE: if (plan.sizeofType == 1) CALL0(char, ICASE); \
if (plan.sizeofType == 2) CALL0(short, ICASE); \
if (plan.sizeofType == 4) CALL0(float, ICASE); \
if (plan.sizeofType == 8) CALL0(double, ICASE); \
break
#include "calls.h"
default:
printf("cuttKernel no template implemented for numRegStorage %d\n", lc.numRegStorage);
return false;
#undef CALL
#undef CALL0
}
}
break;
case PackedSplit:
{
switch(lc.numRegStorage) {
#define CALL0(TYPE, NREG) \
transposePackedSplit<TYPE, NREG> <<< lc.numblock, lc.numthread, lc.shmemsize, plan.stream >>> \
(ts.splitDim, ts.volMmkUnsplit, ts. volMbar, ts.sizeMmk, ts.sizeMbar, \
plan.cuDimMm, plan.cuDimMk, plan.Mmk, plan.Mbar, plan.Msh, (const TYPE *)dataIn, (TYPE *)dataOut)
#define CALL(ICASE) case ICASE: if (plan.sizeofType == 1) CALL0(char, ICASE); \
if (plan.sizeofType == 2) CALL0(short, ICASE); \
if (plan.sizeofType == 4) CALL0(float, ICASE); \
if (plan.sizeofType == 8) CALL0(double, ICASE); \
break
#include "calls.h"
default:
printf("cuttKernel no template implemented for numRegStorage %d\n", lc.numRegStorage);
return false;
#undef CALL
#undef CALL0
}
}
break;
case Tiled:
{
#define CALL(TYPE) \
transposeTiled<TYPE> <<< lc.numblock, lc.numthread, 0, plan.stream >>> \
(((ts.volMm - 1)/TILEDIM + 1), ts.volMbar, ts.sizeMbar, plan.tiledVol, plan.cuDimMk, plan.cuDimMm, \
plan.Mbar, (const TYPE *)dataIn, (TYPE *)dataOut)
if (plan.sizeofType == 1) CALL(char);
if (plan.sizeofType == 2) CALL(short);
if (plan.sizeofType == 4) CALL(float);
if (plan.sizeofType == 8) CALL(double);
#undef CALL
}
break;
case TiledCopy:
{
#define CALL(TYPE) \
transposeTiledCopy<TYPE> <<< lc.numblock, lc.numthread, 0, plan.stream >>> \
(((ts.volMm - 1)/TILEDIM + 1), ts.volMbar, ts.sizeMbar, plan.cuDimMk, plan.cuDimMm, plan.tiledVol, \
plan.Mbar, (const TYPE *)dataIn, (TYPE *)dataOut)
if (plan.sizeofType == 1) CALL(char);
if (plan.sizeofType == 2) CALL(short);
if (plan.sizeofType == 4) CALL(float);
if (plan.sizeofType == 8) CALL(double);
#undef CALL
}
break;
}
CUDA_CALL(cudaGetLastError());
return true;
}
|
52cf005b30ed7737d1bf290260b14447ef5c10e7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include"_reg_resampling.h"
#include"_reg_maths.h"
#include "resampleKernel.h"
#include "_reg_common_cuda.h"
#include"_reg_tools.h"
#include"_reg_ReadWriteImage.h"
#define SINC_KERNEL_RADIUS 3
#define SINC_KERNEL_SIZE SINC_KERNEL_RADIUS*2
/* *************************************************************** */
unsigned int min1(unsigned int a, unsigned int b)
{
return (a < b) ? a : b;
}
/* *************************************************************** */
__device__ __constant__ float cIdentity[16];
__device__ __inline__ void reg_mat44_expm_cuda(float* mat)
{
//todo
}
__device__ __inline__
void reg_mat44_logm_cuda(float* mat)
{
//todo
}
/* *************************************************************** */
template<class DTYPE>
__device__ __inline__ void reg_mat44_mul_cuda(DTYPE const* mat, DTYPE const* in, DTYPE *out)
{
out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * (double)in[2] + (double)mat[0 * 4 + 3]);
out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * (double)in[2] + (double)mat[1 * 4 + 3]);
out[2] = (DTYPE)((double)mat[2 * 4 + 0] * (double)in[0] + (double)mat[2 * 4 + 1] * (double)in[1] + (double)mat[2 * 4 + 2] * (double)in[2] + (double)mat[2 * 4 + 3]);
return;
}
/* *************************************************************** */
template<class DTYPE>
__device__ __inline__ void reg_mat44_mul_cuda(float* mat, DTYPE const* in, DTYPE *out)
{
out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * (double)in[2] + (double)mat[0 * 4 + 3]);
out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * (double)in[2] + (double)mat[1 * 4 + 3]);
out[2] = (DTYPE)((double)mat[2 * 4 + 0] * (double)in[0] + (double)mat[2 * 4 + 1] * (double)in[1] + (double)mat[2 * 4 + 2] * (double)in[2] + (double)mat[2 * 4 + 3]);
return;
}
/* *************************************************************** */
__device__ __inline__ int cuda_reg_floor(double a)
{
return (int) (floor(a));
}
/* *************************************************************** */
template<class FieldTYPE>
__device__ __inline__ void interpolantCubicSpline(FieldTYPE ratio, FieldTYPE *basis)
{
if (ratio < 0.0)
ratio = 0.0; //reg_rounding error
double FF = (double) ratio * ratio;
basis[0] = (FieldTYPE) ((ratio * (((double)2.0 - ratio) * ratio - (double)1.0)) / (double)2.0);
basis[1] = (FieldTYPE) ((FF * ((double)3.0 * ratio - 5.0) + 2.0) / (double)2.0);
basis[2] = (FieldTYPE) ((ratio * (((double)4.0 - (double)3.0 * ratio) * ratio + (double)1.0)) / (double)2.0);
basis[3] = (FieldTYPE) ((ratio - (double)1.0) * FF / (double)2.0);
}
/* *************************************************************** */
__device__ __inline__
void reg_mat44_eye(float *mat) {
mat[0 * 4 + 0] = 1.f;
mat[0 * 4 + 1] = mat[0 * 4 + 2] = mat[0 * 4 + 3] = 0.f;
mat[1 * 4 + 1] = 1.f;
mat[1 * 4 + 0] = mat[1 * 4 + 2] = mat[1 * 4 + 3] = 0.f;
mat[2 * 4 + 2] = 1.f;
mat[2 * 4 + 0] = mat[2 * 4 + 1] = mat[2 * 4 + 3] = 0.f;
mat[3 * 4 + 3] = 1.f;
mat[3 * 4 + 0] = mat[3 * 4 + 1] = mat[3 * 4 + 2] = 0.f;
}
/* *************************************************************** */
__inline__ __device__ void interpWindowedSincKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
int j = 0;
double sum = 0.;
for (int i = -SINC_KERNEL_RADIUS; i < SINC_KERNEL_RADIUS; ++i) {
double x = relative - (double) (i);
if (x == 0.0)
basis[j] = 1.0;
else if (abs(x) >= (double) (SINC_KERNEL_RADIUS))
basis[j] = 0;
else {
double pi_x = M_PI * x;
basis[j] = (SINC_KERNEL_RADIUS) * sin(pi_x) * sin(pi_x / SINC_KERNEL_RADIUS) / (pi_x * pi_x);
}
sum += basis[j];
j++;
}
for (int i = 0; i < SINC_KERNEL_SIZE; ++i)
basis[i] /= sum;
}
/* *************************************************************** */
__inline__ __device__ void interpCubicSplineKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
double FF = relative * relative;
basis[0] = (relative * ((2.0 - relative) * relative - 1.0)) / 2.0;
basis[1] = (FF * (3.0 * relative - 5.0) + 2.0) / 2.0;
basis[2] = (relative * ((4.0 - 3.0 * relative) * relative + 1.0)) / 2.0;
basis[3] = (relative - 1.0) * FF / 2.0;
}
/* *************************************************************** */
__inline__ __device__ void interpLinearKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
basis[1] = relative;
basis[0] = 1.0 - relative;
}
/* *************************************************************** */
__inline__ __device__ void interpNearestNeighKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
basis[0] = basis[1] = 0.0;
if (relative >= 0.5)
basis[1] = 1;
else
basis[0] = 1;
}
/* *************************************************************** */
__inline__ __device__ double interpLoop2D(float* floatingIntensity,
double* xBasis,
double* yBasis,
double* zBasis,
int *previous,
uint3 fi_xyz,
float paddingValue,
unsigned int kernel_size)
{
double intensity = (double)(0.0);
for (int b = 0; b < kernel_size; b++) {
int Y = previous[1] + b;
bool yInBounds = -1 < Y && Y < fi_xyz.y;
double xTempNewValue = 0.0;
for (int a = 0; a < kernel_size; a++) {
int X = previous[0] + a;
bool xInBounds = -1 < X && X < fi_xyz.x;
const unsigned int idx = Y * fi_xyz.x + X;
xTempNewValue += (xInBounds && yInBounds) ? floatingIntensity[idx] * xBasis[a] : paddingValue * xBasis[a];
}
intensity += xTempNewValue * yBasis[b];
}
return intensity;
}
/* *************************************************************** */
__inline__ __device__ double interpLoop3D(float* floatingIntensity,
double* xBasis,
double* yBasis,
double* zBasis,
int *previous,
uint3 fi_xyz,
float paddingValue,
unsigned int kernel_size)
{
double intensity = (double)(0.0);
for (int c = 0; c < kernel_size; c++) {
int Z = previous[2] + c;
bool zInBounds = -1 < Z && Z < fi_xyz.z;
double yTempNewValue = 0.0;
for (int b = 0; b < kernel_size; b++) {
int Y = previous[1] + b;
bool yInBounds = -1 < Y && Y < fi_xyz.y;
double xTempNewValue = 0.0;
for (int a = 0; a < kernel_size; a++) {
int X = previous[0] + a;
bool xInBounds = -1 < X && X < fi_xyz.x;
const unsigned int idx = Z * fi_xyz.x * fi_xyz.y + Y * fi_xyz.x + X;
xTempNewValue += (xInBounds && yInBounds && zInBounds) ? floatingIntensity[idx] * xBasis[a] : paddingValue * xBasis[a];
}
yTempNewValue += xTempNewValue * yBasis[b];
}
intensity += yTempNewValue * zBasis[c];
}
return intensity;
}
/* *************************************************************** */
__global__ void ResampleImage2D(float* floatingImage,
float* deformationField,
float* warpedImage,
int *mask,
float* sourceIJKMatrix,
ulong2 voxelNumber,
uint3 fi_xyz,
uint2 wi_tu,
float paddingValue,
int kernelType)
{
float *sourceIntensityPtr = (floatingImage);
float *resultIntensityPtr = (warpedImage);
float *deformationFieldPtrX = (deformationField);
float *deformationFieldPtrY = &deformationFieldPtrX[voxelNumber.x];
int *maskPtr = &mask[0];
long index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < voxelNumber.x) {
for (unsigned int t = 0; t < wi_tu.x * wi_tu.y; t++) {
float *resultIntensity = &resultIntensityPtr[t * voxelNumber.x];
float *floatingIntensity = &sourceIntensityPtr[t * voxelNumber.y];
double intensity = paddingValue;
if (maskPtr[index] > -1) {
int previous[3];
float world[3], position[3];
double relative[3];
world[0] = (float)(deformationFieldPtrX[index]);
world[1] = (float)(deformationFieldPtrY[index]);
world[2] = 0.0f;
// real -> voxel; floating space
reg_mat44_mul_cuda<float>(sourceIJKMatrix, world, position);
previous[0] = cuda_reg_floor(position[0]);
previous[1] = cuda_reg_floor(position[1]);
relative[0] = (double)(position[0]) - (double)(previous[0]);
relative[1] = (double)(position[1]) - (double)(previous[1]);
if (kernelType == 0) {
double xBasisIn[2], yBasisIn[2], zBasisIn[2];
interpNearestNeighKernel(relative[0], xBasisIn);
interpNearestNeighKernel(relative[1], yBasisIn);
intensity = interpLoop2D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 2);
}
else if (kernelType == 1) {
double xBasisIn[2], yBasisIn[2], zBasisIn[2];
interpLinearKernel(relative[0], xBasisIn);
interpLinearKernel(relative[1], yBasisIn);
intensity = interpLoop2D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 2);
}
else if (kernelType == 4) {
double xBasisIn[6], yBasisIn[6], zBasisIn[6];
previous[0] -= SINC_KERNEL_RADIUS;
previous[1] -= SINC_KERNEL_RADIUS;
previous[2] -= SINC_KERNEL_RADIUS;
interpWindowedSincKernel(relative[0], xBasisIn);
interpWindowedSincKernel(relative[1], yBasisIn);
intensity = interpLoop2D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 6);
}
else {
double xBasisIn[4], yBasisIn[4], zBasisIn[4];
previous[0]--;
previous[1]--;
previous[2]--;
interpCubicSplineKernel(relative[0], xBasisIn);
interpCubicSplineKernel(relative[1], yBasisIn);
intensity = interpLoop2D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 4);
}
}
resultIntensity[index] = (float)intensity;
}
index += blockDim.x * gridDim.x;
}
}
/* *************************************************************** */
__global__ void ResampleImage3D(float* floatingImage,
float* deformationField,
float* warpedImage,
int *mask,
float* sourceIJKMatrix,
ulong2 voxelNumber,
uint3 fi_xyz,
uint2 wi_tu,
float paddingValue,
int kernelType)
{
float *sourceIntensityPtr = (floatingImage);
float *resultIntensityPtr = (warpedImage);
float *deformationFieldPtrX = (deformationField);
float *deformationFieldPtrY = &deformationFieldPtrX[voxelNumber.x];
float *deformationFieldPtrZ = &deformationFieldPtrY[voxelNumber.x];
int *maskPtr = &mask[0];
long index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < voxelNumber.x) {
for (unsigned int t = 0; t < wi_tu.x * wi_tu.y; t++) {
float *resultIntensity = &resultIntensityPtr[t * voxelNumber.x];
float *floatingIntensity = &sourceIntensityPtr[t * voxelNumber.y];
double intensity = paddingValue;
if (maskPtr[index] > -1) {
int previous[3];
float world[3], position[3];
double relative[3];
world[0] = (float) (deformationFieldPtrX[index]);
world[1] = (float) (deformationFieldPtrY[index]);
world[2] = (float) (deformationFieldPtrZ[index]);
// real -> voxel; floating space
reg_mat44_mul_cuda<float>(sourceIJKMatrix, world, position);
previous[0] = cuda_reg_floor(position[0]);
previous[1] = cuda_reg_floor(position[1]);
previous[2] = cuda_reg_floor(position[2]);
relative[0] = (double)(position[0]) - (double)(previous[0]);
relative[1] = (double)(position[1]) - (double)(previous[1]);
relative[2] = (double)(position[2]) - (double)(previous[2]);
if (kernelType == 0) {
double xBasisIn[2], yBasisIn[2], zBasisIn[2];
interpNearestNeighKernel(relative[0], xBasisIn);
interpNearestNeighKernel(relative[1], yBasisIn);
interpNearestNeighKernel(relative[2], zBasisIn);
intensity = interpLoop3D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 2);
} else if (kernelType == 1) {
double xBasisIn[2], yBasisIn[2], zBasisIn[2];
interpLinearKernel(relative[0], xBasisIn);
interpLinearKernel(relative[1], yBasisIn);
interpLinearKernel(relative[2], zBasisIn);
intensity = interpLoop3D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 2);
} else if (kernelType == 4) {
double xBasisIn[6], yBasisIn[6], zBasisIn[6];
previous[0] -= SINC_KERNEL_RADIUS;
previous[1] -= SINC_KERNEL_RADIUS;
previous[2] -= SINC_KERNEL_RADIUS;
interpWindowedSincKernel(relative[0], xBasisIn);
interpWindowedSincKernel(relative[1], yBasisIn);
interpWindowedSincKernel(relative[2], zBasisIn);
intensity = interpLoop3D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 6);
} else {
double xBasisIn[4], yBasisIn[4], zBasisIn[4];
previous[0]--;
previous[1]--;
previous[2]--;
interpCubicSplineKernel(relative[0], xBasisIn);
interpCubicSplineKernel(relative[1], yBasisIn);
interpCubicSplineKernel(relative[2], zBasisIn);
intensity = interpLoop3D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 4);
}
}
resultIntensity[index] = (float)intensity;
}
index += blockDim.x * gridDim.x;
}
}
/* *************************************************************** */
void launchResample(nifti_image *floatingImage,
nifti_image *warpedImage,
int interp,
float paddingValue,
bool *dti_timepoint,
mat33 *jacMat,
float **floatingImage_d,
float **warpedImage_d,
float **deformationFieldImage_d,
int **mask_d,
float **sourceIJKMatrix_d) {
// Define the DTI indices if required
if(dti_timepoint!=NULL || jacMat!=NULL){
reg_print_fct_error("launchResample");
reg_print_msg_error("The DTI resampling has not yet been implemented with the CUDA platform. Exit.");
reg_exit();
}
long targetVoxelNumber = (long) warpedImage->nx * warpedImage->ny * warpedImage->nz;
//the below lines need to be moved to cu common
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
unsigned int maxThreads = 512;
unsigned int maxBlocks = 65365;
unsigned int blocks = (targetVoxelNumber % maxThreads) ? (targetVoxelNumber / maxThreads) + 1 : targetVoxelNumber / maxThreads;
blocks = min1(blocks, maxBlocks);
dim3 mygrid(blocks, 1, 1);
dim3 myblocks(maxThreads, 1, 1);
ulong2 voxelNumber = make_ulong2(warpedImage->nx * warpedImage->ny * warpedImage->nz, floatingImage->nx * floatingImage->ny * floatingImage->nz);
uint3 fi_xyz = make_uint3(floatingImage->nx, floatingImage->ny, floatingImage->nz);
uint2 wi_tu = make_uint2(warpedImage->nt, warpedImage->nu);
if (floatingImage->nz > 1) {
hipLaunchKernelGGL(( ResampleImage3D) , dim3(mygrid), dim3(myblocks) , 0, 0, *floatingImage_d,
*deformationFieldImage_d,
*warpedImage_d,
*mask_d,
*sourceIJKMatrix_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
}
else{
hipLaunchKernelGGL(( ResampleImage2D) , dim3(mygrid), dim3(myblocks) , 0, 0, *floatingImage_d,
*deformationFieldImage_d,
*warpedImage_d,
*mask_d,
*sourceIJKMatrix_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
}
#ifndef NDEBUG
NR_CUDA_CHECK_KERNEL(mygrid, myblocks)
#else
NR_CUDA_SAFE_CALL(hipDeviceSynchronize());
#endif
}
/* *************************************************************** */
void identityConst()
{
float* mat_h = (float*) malloc(16 * sizeof(float));
mat44 *final = new mat44();
// Set the current transformation to identity
final->m[0][0] = final->m[1][1] = final->m[2][2] = final->m[3][3] = 1.0f;
final->m[0][1] = final->m[0][2] = final->m[0][3] = 0.0f;
final->m[1][0] = final->m[1][2] = final->m[1][3] = 0.0f;
final->m[2][0] = final->m[2][1] = final->m[2][3] = 0.0f;
final->m[3][0] = final->m[3][1] = final->m[3][2] = 0.0f;
mat44ToCptr(*final, mat_h);
hipMemcpyToSymbol(cIdentity, &mat_h, 16 * sizeof(float));
}
/* *************************************************************** */
| 52cf005b30ed7737d1bf290260b14447ef5c10e7.cu | #include <stdio.h>
#include <assert.h>
#include "cuda_runtime.h"
#include "cuda.h"
#include"_reg_resampling.h"
#include"_reg_maths.h"
#include "resampleKernel.h"
#include "_reg_common_cuda.h"
#include"_reg_tools.h"
#include"_reg_ReadWriteImage.h"
#define SINC_KERNEL_RADIUS 3
#define SINC_KERNEL_SIZE SINC_KERNEL_RADIUS*2
/* *************************************************************** */
unsigned int min1(unsigned int a, unsigned int b)
{
return (a < b) ? a : b;
}
/* *************************************************************** */
__device__ __constant__ float cIdentity[16];
__device__ __inline__ void reg_mat44_expm_cuda(float* mat)
{
//todo
}
__device__ __inline__
void reg_mat44_logm_cuda(float* mat)
{
//todo
}
/* *************************************************************** */
template<class DTYPE>
__device__ __inline__ void reg_mat44_mul_cuda(DTYPE const* mat, DTYPE const* in, DTYPE *out)
{
out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * (double)in[2] + (double)mat[0 * 4 + 3]);
out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * (double)in[2] + (double)mat[1 * 4 + 3]);
out[2] = (DTYPE)((double)mat[2 * 4 + 0] * (double)in[0] + (double)mat[2 * 4 + 1] * (double)in[1] + (double)mat[2 * 4 + 2] * (double)in[2] + (double)mat[2 * 4 + 3]);
return;
}
/* *************************************************************** */
template<class DTYPE>
__device__ __inline__ void reg_mat44_mul_cuda(float* mat, DTYPE const* in, DTYPE *out)
{
out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * (double)in[2] + (double)mat[0 * 4 + 3]);
out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * (double)in[2] + (double)mat[1 * 4 + 3]);
out[2] = (DTYPE)((double)mat[2 * 4 + 0] * (double)in[0] + (double)mat[2 * 4 + 1] * (double)in[1] + (double)mat[2 * 4 + 2] * (double)in[2] + (double)mat[2 * 4 + 3]);
return;
}
/* *************************************************************** */
__device__ __inline__ int cuda_reg_floor(double a)
{
return (int) (floor(a));
}
/* *************************************************************** */
template<class FieldTYPE>
__device__ __inline__ void interpolantCubicSpline(FieldTYPE ratio, FieldTYPE *basis)
{
if (ratio < 0.0)
ratio = 0.0; //reg_rounding error
double FF = (double) ratio * ratio;
basis[0] = (FieldTYPE) ((ratio * (((double)2.0 - ratio) * ratio - (double)1.0)) / (double)2.0);
basis[1] = (FieldTYPE) ((FF * ((double)3.0 * ratio - 5.0) + 2.0) / (double)2.0);
basis[2] = (FieldTYPE) ((ratio * (((double)4.0 - (double)3.0 * ratio) * ratio + (double)1.0)) / (double)2.0);
basis[3] = (FieldTYPE) ((ratio - (double)1.0) * FF / (double)2.0);
}
/* *************************************************************** */
__device__ __inline__
void reg_mat44_eye(float *mat) {
mat[0 * 4 + 0] = 1.f;
mat[0 * 4 + 1] = mat[0 * 4 + 2] = mat[0 * 4 + 3] = 0.f;
mat[1 * 4 + 1] = 1.f;
mat[1 * 4 + 0] = mat[1 * 4 + 2] = mat[1 * 4 + 3] = 0.f;
mat[2 * 4 + 2] = 1.f;
mat[2 * 4 + 0] = mat[2 * 4 + 1] = mat[2 * 4 + 3] = 0.f;
mat[3 * 4 + 3] = 1.f;
mat[3 * 4 + 0] = mat[3 * 4 + 1] = mat[3 * 4 + 2] = 0.f;
}
/* *************************************************************** */
__inline__ __device__ void interpWindowedSincKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
int j = 0;
double sum = 0.;
for (int i = -SINC_KERNEL_RADIUS; i < SINC_KERNEL_RADIUS; ++i) {
double x = relative - (double) (i);
if (x == 0.0)
basis[j] = 1.0;
else if (abs(x) >= (double) (SINC_KERNEL_RADIUS))
basis[j] = 0;
else {
double pi_x = M_PI * x;
basis[j] = (SINC_KERNEL_RADIUS) * sin(pi_x) * sin(pi_x / SINC_KERNEL_RADIUS) / (pi_x * pi_x);
}
sum += basis[j];
j++;
}
for (int i = 0; i < SINC_KERNEL_SIZE; ++i)
basis[i] /= sum;
}
/* *************************************************************** */
__inline__ __device__ void interpCubicSplineKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
double FF = relative * relative;
basis[0] = (relative * ((2.0 - relative) * relative - 1.0)) / 2.0;
basis[1] = (FF * (3.0 * relative - 5.0) + 2.0) / 2.0;
basis[2] = (relative * ((4.0 - 3.0 * relative) * relative + 1.0)) / 2.0;
basis[3] = (relative - 1.0) * FF / 2.0;
}
/* *************************************************************** */
__inline__ __device__ void interpLinearKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
basis[1] = relative;
basis[0] = 1.0 - relative;
}
/* *************************************************************** */
__inline__ __device__ void interpNearestNeighKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
basis[0] = basis[1] = 0.0;
if (relative >= 0.5)
basis[1] = 1;
else
basis[0] = 1;
}
/* *************************************************************** */
__inline__ __device__ double interpLoop2D(float* floatingIntensity,
double* xBasis,
double* yBasis,
double* zBasis,
int *previous,
uint3 fi_xyz,
float paddingValue,
unsigned int kernel_size)
{
double intensity = (double)(0.0);
for (int b = 0; b < kernel_size; b++) {
int Y = previous[1] + b;
bool yInBounds = -1 < Y && Y < fi_xyz.y;
double xTempNewValue = 0.0;
for (int a = 0; a < kernel_size; a++) {
int X = previous[0] + a;
bool xInBounds = -1 < X && X < fi_xyz.x;
const unsigned int idx = Y * fi_xyz.x + X;
xTempNewValue += (xInBounds && yInBounds) ? floatingIntensity[idx] * xBasis[a] : paddingValue * xBasis[a];
}
intensity += xTempNewValue * yBasis[b];
}
return intensity;
}
/* *************************************************************** */
__inline__ __device__ double interpLoop3D(float* floatingIntensity,
double* xBasis,
double* yBasis,
double* zBasis,
int *previous,
uint3 fi_xyz,
float paddingValue,
unsigned int kernel_size)
{
double intensity = (double)(0.0);
for (int c = 0; c < kernel_size; c++) {
int Z = previous[2] + c;
bool zInBounds = -1 < Z && Z < fi_xyz.z;
double yTempNewValue = 0.0;
for (int b = 0; b < kernel_size; b++) {
int Y = previous[1] + b;
bool yInBounds = -1 < Y && Y < fi_xyz.y;
double xTempNewValue = 0.0;
for (int a = 0; a < kernel_size; a++) {
int X = previous[0] + a;
bool xInBounds = -1 < X && X < fi_xyz.x;
const unsigned int idx = Z * fi_xyz.x * fi_xyz.y + Y * fi_xyz.x + X;
xTempNewValue += (xInBounds && yInBounds && zInBounds) ? floatingIntensity[idx] * xBasis[a] : paddingValue * xBasis[a];
}
yTempNewValue += xTempNewValue * yBasis[b];
}
intensity += yTempNewValue * zBasis[c];
}
return intensity;
}
/* *************************************************************** */
__global__ void ResampleImage2D(float* floatingImage,
float* deformationField,
float* warpedImage,
int *mask,
float* sourceIJKMatrix,
ulong2 voxelNumber,
uint3 fi_xyz,
uint2 wi_tu,
float paddingValue,
int kernelType)
{
float *sourceIntensityPtr = (floatingImage);
float *resultIntensityPtr = (warpedImage);
float *deformationFieldPtrX = (deformationField);
float *deformationFieldPtrY = &deformationFieldPtrX[voxelNumber.x];
int *maskPtr = &mask[0];
long index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < voxelNumber.x) {
for (unsigned int t = 0; t < wi_tu.x * wi_tu.y; t++) {
float *resultIntensity = &resultIntensityPtr[t * voxelNumber.x];
float *floatingIntensity = &sourceIntensityPtr[t * voxelNumber.y];
double intensity = paddingValue;
if (maskPtr[index] > -1) {
int previous[3];
float world[3], position[3];
double relative[3];
world[0] = (float)(deformationFieldPtrX[index]);
world[1] = (float)(deformationFieldPtrY[index]);
world[2] = 0.0f;
// real -> voxel; floating space
reg_mat44_mul_cuda<float>(sourceIJKMatrix, world, position);
previous[0] = cuda_reg_floor(position[0]);
previous[1] = cuda_reg_floor(position[1]);
relative[0] = (double)(position[0]) - (double)(previous[0]);
relative[1] = (double)(position[1]) - (double)(previous[1]);
if (kernelType == 0) {
double xBasisIn[2], yBasisIn[2], zBasisIn[2];
interpNearestNeighKernel(relative[0], xBasisIn);
interpNearestNeighKernel(relative[1], yBasisIn);
intensity = interpLoop2D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 2);
}
else if (kernelType == 1) {
double xBasisIn[2], yBasisIn[2], zBasisIn[2];
interpLinearKernel(relative[0], xBasisIn);
interpLinearKernel(relative[1], yBasisIn);
intensity = interpLoop2D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 2);
}
else if (kernelType == 4) {
double xBasisIn[6], yBasisIn[6], zBasisIn[6];
previous[0] -= SINC_KERNEL_RADIUS;
previous[1] -= SINC_KERNEL_RADIUS;
previous[2] -= SINC_KERNEL_RADIUS;
interpWindowedSincKernel(relative[0], xBasisIn);
interpWindowedSincKernel(relative[1], yBasisIn);
intensity = interpLoop2D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 6);
}
else {
double xBasisIn[4], yBasisIn[4], zBasisIn[4];
previous[0]--;
previous[1]--;
previous[2]--;
interpCubicSplineKernel(relative[0], xBasisIn);
interpCubicSplineKernel(relative[1], yBasisIn);
intensity = interpLoop2D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 4);
}
}
resultIntensity[index] = (float)intensity;
}
index += blockDim.x * gridDim.x;
}
}
/* *************************************************************** */
__global__ void ResampleImage3D(float* floatingImage,
float* deformationField,
float* warpedImage,
int *mask,
float* sourceIJKMatrix,
ulong2 voxelNumber,
uint3 fi_xyz,
uint2 wi_tu,
float paddingValue,
int kernelType)
{
float *sourceIntensityPtr = (floatingImage);
float *resultIntensityPtr = (warpedImage);
float *deformationFieldPtrX = (deformationField);
float *deformationFieldPtrY = &deformationFieldPtrX[voxelNumber.x];
float *deformationFieldPtrZ = &deformationFieldPtrY[voxelNumber.x];
int *maskPtr = &mask[0];
long index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < voxelNumber.x) {
for (unsigned int t = 0; t < wi_tu.x * wi_tu.y; t++) {
float *resultIntensity = &resultIntensityPtr[t * voxelNumber.x];
float *floatingIntensity = &sourceIntensityPtr[t * voxelNumber.y];
double intensity = paddingValue;
if (maskPtr[index] > -1) {
int previous[3];
float world[3], position[3];
double relative[3];
world[0] = (float) (deformationFieldPtrX[index]);
world[1] = (float) (deformationFieldPtrY[index]);
world[2] = (float) (deformationFieldPtrZ[index]);
// real -> voxel; floating space
reg_mat44_mul_cuda<float>(sourceIJKMatrix, world, position);
previous[0] = cuda_reg_floor(position[0]);
previous[1] = cuda_reg_floor(position[1]);
previous[2] = cuda_reg_floor(position[2]);
relative[0] = (double)(position[0]) - (double)(previous[0]);
relative[1] = (double)(position[1]) - (double)(previous[1]);
relative[2] = (double)(position[2]) - (double)(previous[2]);
if (kernelType == 0) {
double xBasisIn[2], yBasisIn[2], zBasisIn[2];
interpNearestNeighKernel(relative[0], xBasisIn);
interpNearestNeighKernel(relative[1], yBasisIn);
interpNearestNeighKernel(relative[2], zBasisIn);
intensity = interpLoop3D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 2);
} else if (kernelType == 1) {
double xBasisIn[2], yBasisIn[2], zBasisIn[2];
interpLinearKernel(relative[0], xBasisIn);
interpLinearKernel(relative[1], yBasisIn);
interpLinearKernel(relative[2], zBasisIn);
intensity = interpLoop3D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 2);
} else if (kernelType == 4) {
double xBasisIn[6], yBasisIn[6], zBasisIn[6];
previous[0] -= SINC_KERNEL_RADIUS;
previous[1] -= SINC_KERNEL_RADIUS;
previous[2] -= SINC_KERNEL_RADIUS;
interpWindowedSincKernel(relative[0], xBasisIn);
interpWindowedSincKernel(relative[1], yBasisIn);
interpWindowedSincKernel(relative[2], zBasisIn);
intensity = interpLoop3D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 6);
} else {
double xBasisIn[4], yBasisIn[4], zBasisIn[4];
previous[0]--;
previous[1]--;
previous[2]--;
interpCubicSplineKernel(relative[0], xBasisIn);
interpCubicSplineKernel(relative[1], yBasisIn);
interpCubicSplineKernel(relative[2], zBasisIn);
intensity = interpLoop3D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 4);
}
}
resultIntensity[index] = (float)intensity;
}
index += blockDim.x * gridDim.x;
}
}
/* *************************************************************** */
void launchResample(nifti_image *floatingImage,
nifti_image *warpedImage,
int interp,
float paddingValue,
bool *dti_timepoint,
mat33 *jacMat,
float **floatingImage_d,
float **warpedImage_d,
float **deformationFieldImage_d,
int **mask_d,
float **sourceIJKMatrix_d) {
// Define the DTI indices if required
if(dti_timepoint!=NULL || jacMat!=NULL){
reg_print_fct_error("launchResample");
reg_print_msg_error("The DTI resampling has not yet been implemented with the CUDA platform. Exit.");
reg_exit();
}
long targetVoxelNumber = (long) warpedImage->nx * warpedImage->ny * warpedImage->nz;
//the below lines need to be moved to cu common
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
unsigned int maxThreads = 512;
unsigned int maxBlocks = 65365;
unsigned int blocks = (targetVoxelNumber % maxThreads) ? (targetVoxelNumber / maxThreads) + 1 : targetVoxelNumber / maxThreads;
blocks = min1(blocks, maxBlocks);
dim3 mygrid(blocks, 1, 1);
dim3 myblocks(maxThreads, 1, 1);
ulong2 voxelNumber = make_ulong2(warpedImage->nx * warpedImage->ny * warpedImage->nz, floatingImage->nx * floatingImage->ny * floatingImage->nz);
uint3 fi_xyz = make_uint3(floatingImage->nx, floatingImage->ny, floatingImage->nz);
uint2 wi_tu = make_uint2(warpedImage->nt, warpedImage->nu);
if (floatingImage->nz > 1) {
ResampleImage3D <<<mygrid, myblocks >>>(*floatingImage_d,
*deformationFieldImage_d,
*warpedImage_d,
*mask_d,
*sourceIJKMatrix_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
}
else{
ResampleImage2D <<<mygrid, myblocks >>>(*floatingImage_d,
*deformationFieldImage_d,
*warpedImage_d,
*mask_d,
*sourceIJKMatrix_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
}
#ifndef NDEBUG
NR_CUDA_CHECK_KERNEL(mygrid, myblocks)
#else
NR_CUDA_SAFE_CALL(cudaThreadSynchronize());
#endif
}
/* *************************************************************** */
void identityConst()
{
float* mat_h = (float*) malloc(16 * sizeof(float));
mat44 *final = new mat44();
// Set the current transformation to identity
final->m[0][0] = final->m[1][1] = final->m[2][2] = final->m[3][3] = 1.0f;
final->m[0][1] = final->m[0][2] = final->m[0][3] = 0.0f;
final->m[1][0] = final->m[1][2] = final->m[1][3] = 0.0f;
final->m[2][0] = final->m[2][1] = final->m[2][3] = 0.0f;
final->m[3][0] = final->m[3][1] = final->m[3][2] = 0.0f;
mat44ToCptr(*final, mat_h);
cudaMemcpyToSymbol(cIdentity, &mat_h, 16 * sizeof(float));
}
/* *************************************************************** */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.