serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
8,201 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#define MAX_DATA_SIZE (1 << 27)
typedef unsigned char byte;
const byte sbox[256] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
};
__device__ __constant__
byte d_sbox[256] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
};
__device__ __constant__
byte gmul2[256] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde,
0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
0x1b, 0x19, 0x1f, 0x1d, 0x13, 0x11, 0x17, 0x15, 0x0b, 0x09, 0x0f, 0x0d, 0x03, 0x01, 0x07, 0x05,
0x3b, 0x39, 0x3f, 0x3d, 0x33, 0x31, 0x37, 0x35, 0x2b, 0x29, 0x2f, 0x2d, 0x23, 0x21, 0x27, 0x25,
0x5b, 0x59, 0x5f, 0x5d, 0x53, 0x51, 0x57, 0x55, 0x4b, 0x49, 0x4f, 0x4d, 0x43, 0x41, 0x47, 0x45,
0x7b, 0x79, 0x7f, 0x7d, 0x73, 0x71, 0x77, 0x75, 0x6b, 0x69, 0x6f, 0x6d, 0x63, 0x61, 0x67, 0x65,
0x9b, 0x99, 0x9f, 0x9d, 0x93, 0x91, 0x97, 0x95, 0x8b, 0x89, 0x8f, 0x8d, 0x83, 0x81, 0x87, 0x85,
0xbb, 0xb9, 0xbf, 0xbd, 0xb3, 0xb1, 0xb7, 0xb5, 0xab, 0xa9, 0xaf, 0xad, 0xa3, 0xa1, 0xa7, 0xa5,
0xdb, 0xd9, 0xdf, 0xdd, 0xd3, 0xd1, 0xd7, 0xd5, 0xcb, 0xc9, 0xcf, 0xcd, 0xc3, 0xc1, 0xc7, 0xc5,
0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5
};
__device__ __constant__
byte gmul3[256] = {
0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11,
0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21,
0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71,
0x50, 0x53, 0x56, 0x55, 0x5c, 0x5f, 0x5a, 0x59, 0x48, 0x4b, 0x4e, 0x4d, 0x44, 0x47, 0x42, 0x41,
0xc0, 0xc3, 0xc6, 0xc5, 0xcc, 0xcf, 0xca, 0xc9, 0xd8, 0xdb, 0xde, 0xdd, 0xd4, 0xd7, 0xd2, 0xd1,
0xf0, 0xf3, 0xf6, 0xf5, 0xfc, 0xff, 0xfa, 0xf9, 0xe8, 0xeb, 0xee, 0xed, 0xe4, 0xe7, 0xe2, 0xe1,
0xa0, 0xa3, 0xa6, 0xa5, 0xac, 0xaf, 0xaa, 0xa9, 0xb8, 0xbb, 0xbe, 0xbd, 0xb4, 0xb7, 0xb2, 0xb1,
0x90, 0x93, 0x96, 0x95, 0x9c, 0x9f, 0x9a, 0x99, 0x88, 0x8b, 0x8e, 0x8d, 0x84, 0x87, 0x82, 0x81,
0x9b, 0x98, 0x9d, 0x9e, 0x97, 0x94, 0x91, 0x92, 0x83, 0x80, 0x85, 0x86, 0x8f, 0x8c, 0x89, 0x8a,
0xab, 0xa8, 0xad, 0xae, 0xa7, 0xa4, 0xa1, 0xa2, 0xb3, 0xb0, 0xb5, 0xb6, 0xbf, 0xbc, 0xb9, 0xba,
0xfb, 0xf8, 0xfd, 0xfe, 0xf7, 0xf4, 0xf1, 0xf2, 0xe3, 0xe0, 0xe5, 0xe6, 0xef, 0xec, 0xe9, 0xea,
0xcb, 0xc8, 0xcd, 0xce, 0xc7, 0xc4, 0xc1, 0xc2, 0xd3, 0xd0, 0xd5, 0xd6, 0xdf, 0xdc, 0xd9, 0xda,
0x5b, 0x58, 0x5d, 0x5e, 0x57, 0x54, 0x51, 0x52, 0x43, 0x40, 0x45, 0x46, 0x4f, 0x4c, 0x49, 0x4a,
0x6b, 0x68, 0x6d, 0x6e, 0x67, 0x64, 0x61, 0x62, 0x73, 0x70, 0x75, 0x76, 0x7f, 0x7c, 0x79, 0x7a,
0x3b, 0x38, 0x3d, 0x3e, 0x37, 0x34, 0x31, 0x32, 0x23, 0x20, 0x25, 0x26, 0x2f, 0x2c, 0x29, 0x2a,
0x0b, 0x08, 0x0d, 0x0e, 0x07, 0x04, 0x01, 0x02, 0x13, 0x10, 0x15, 0x16, 0x1f, 0x1c, 0x19, 0x1a
};
const byte rcon[11] = {
0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36
};
void keyExpansion(byte *roundKeys, byte *key) {
byte t[4], tb;
int k;
for (int i = 0; i < 16; ++i) {
roundKeys[i] = key[i];
}
for (int i = 4; i < 11 * 4; ++i) {
k = (i - 1) * 4;
t[0] = roundKeys[k + 0];
t[1] = roundKeys[k + 1];
t[2] = roundKeys[k + 2];
t[3] = roundKeys[k + 3];
if (i % 4 == 0) {
tb = t[0];
t[0] = t[1];
t[1] = t[2];
t[2] = t[3];
t[3] = tb;
t[0] = sbox[t[0]];
t[1] = sbox[t[1]];
t[2] = sbox[t[2]];
t[3] = sbox[t[3]];
t[0] = t[0] ^ rcon[i / 4];
}
k = (i - 4) * 4;
roundKeys[i * 4 + 0] = roundKeys[k + 0] ^ t[0];
roundKeys[i * 4 + 1] = roundKeys[k + 1] ^ t[1];
roundKeys[i * 4 + 2] = roundKeys[k + 2] ^ t[2];
roundKeys[i * 4 + 3] = roundKeys[k + 3] ^ t[3];
}
}
__device__
void addRoundKey(byte state[4][4], byte *roundKeys, int round) {
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
state[i][j] ^= roundKeys[round * 16 + i * 4 + j];
}
}
}
__device__
void subBytes(byte state[4][4]) {
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
state[i][j] = d_sbox[state[i][j]];
}
}
}
__device__
void shiftRows(byte state[4][4]) {
byte t;
t = state[0][1];
state[0][1] = state[1][1];
state[1][1] = state[2][1];
state[2][1] = state[3][1];
state[3][1] = t;
t = state[0][2];
state[0][2] = state[2][2];
state[2][2] = t;
t = state[1][2];
state[1][2] = state[3][2];
state[3][2] = t;
t = state[0][3];
state[0][3] = state[3][3];
state[3][3] = state[2][3];
state[2][3] = state[1][3];
state[1][3] = t;
}
__device__
void mixColumns(byte state[4][4]) {
byte t[4];
for (int i = 0; i < 4; ++i) {
t[0] = gmul2[state[i][0]] ^ gmul3[state[i][1]] ^ state[i][2] ^ state[i][3];
t[1] = state[i][0] ^ gmul2[state[i][1]] ^ gmul3[state[i][2]] ^ state[i][3];
t[2] = state[i][0] ^ state[i][1] ^ gmul2[state[i][2]] ^ gmul3[state[i][3]];
t[3] = gmul3[state[i][0]] ^ state[i][1] ^ state[i][2] ^ gmul2[state[i][3]];
for (int j = 0; j < 4; ++j) {
state[i][j] = t[j];
}
}
}
__global__
void encrypt(byte *data, byte *roundKeys, int n) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n) {
byte state[4][4];
for (int i = 0; i < 16; ++i) {
state[i / 4][i % 4] = data[index * 16 + i];
}
addRoundKey(state, roundKeys, 0);
for (int round = 1; round < 10; ++round) {
subBytes(state);
shiftRows(state);
mixColumns(state);
addRoundKey(state, roundKeys, round);
}
subBytes(state);
shiftRows(state);
addRoundKey(state, roundKeys, 10);
for (int i = 0; i < 16; ++i) {
data[index * 16 + i] = state[i / 4][i % 4];
}
}
}
void padData(byte *data, int *len) {
int dataLength = *len;
if (dataLength % 16 > 0) {
int padSize = 16 - (dataLength % 16);
data = (byte *) realloc(data, sizeof(byte) * (dataLength + padSize));
for (int i = 0; i < padSize; ++i) {
data[dataLength + i] = 0;
}
*len = dataLength + padSize;
printf("Added padding to data\nData size: %d\n", *len);
}
}
void aes128(byte *data, byte *key, int dataLength) {
byte roundKeys[11 * 16];
byte *dData, *dRoundKeys;
float timeElapsed = 0.0;
keyExpansion(roundKeys, key);
cudaMalloc((void **) &dData, sizeof(byte) * dataLength);
cudaMalloc((void **) &dRoundKeys, sizeof(byte) * 11 * 16);
cudaMemcpy(dData, data, sizeof(byte) * dataLength, cudaMemcpyHostToDevice);
cudaMemcpy(dRoundKeys, roundKeys, sizeof(byte) * 11 * 16, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 nBlocks(1 << 15);
dim3 nThreads(1 << 10);
cudaEventRecord(start, 0);
encrypt<<<nBlocks, nThreads>>>(dData, dRoundKeys, dataLength / 16);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeElapsed, start, stop);
cudaMemcpy(data, dData, dataLength, cudaMemcpyDeviceToHost);
printf("%lf\n", timeElapsed);
cudaFree(dData);
cudaFree(dRoundKeys);
}
int main() {
byte key[16];
byte *data = (byte *) malloc(sizeof(byte) * MAX_DATA_SIZE);
int dataLength;
FILE *fKey = fopen("key.txt", "rb");
fread(key, 1, 16, fKey);
fclose(fKey);
FILE *fData = fopen("payload.txt", "rb");
dataLength = fread(data, 1, MAX_DATA_SIZE, fData);
fclose(fData);
printf("Data size: %d\n", dataLength);
padData(data, &dataLength);
printf("Number of blocks: %d\n", dataLength / 16);
aes128(data, key, dataLength);
FILE *f_out = fopen("outcuda.txt", "wb");
fwrite(data, 1, dataLength, f_out);
fclose(f_out);
free(data);
} |
8,202 | #include <stdio.h>
#include <stdlib.h>
#define ONE_MB 1024*1024
#define SIXTEEN_MB 16*1024*1024
#define FOUR_MB 4*1024*1024
__global__ void testKernel1 (){
}
__global__ void testKernel2 (int *array1, int *resultArray){
int index = threadIdx.x + blockIdx.x*blockDim.x;
resultArray[index] = array1[index];
}
__global__ void testKernel3(int *c_array, int *transpose_array, int width){
int index = threadIdx.x + blockIdx.x * blockDim.x;
transpose_array[((int)(index/width) + ((index%width)*(width)))] = c_array[index];
}
int main() {
float time_in_ms = 0.0f;
cudaEvent_t start, stop;
cudaError_t err=cudaSuccess;
//===========TASK 1============================
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
testKernel1<<<1,1>>>();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop); //Wait till the event is executed.
cudaEventElapsedTime(&time_in_ms,start,stop);
printf("Time for empty kernel to exexute:%fms\n\n",time_in_ms);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//========TASK 1 end============================
//=======TASK 2=================================
int *array1 = (int *) malloc(ONE_MB);
int *c_array;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for (int i = 0; i < ONE_MB/sizeof(int); i++) {
array1[i] = i;
}
cudaMalloc((void **)&c_array, ONE_MB);
cudaEventRecord(start,0);
cudaMemcpy(c_array, array1, ONE_MB, cudaMemcpyHostToDevice);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_in_ms,start,stop);
printf("Time for memory copy of %d bytes is :%fms\n\n",ONE_MB,time_in_ms);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(c_array);
//==============TASK 2 end======================
//=============TASK 3===========================
int *arrayCopy = (int *) malloc(ONE_MB),*c_arrayCopy;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc((void **)&c_array, ONE_MB);
cudaMalloc((void **)&c_arrayCopy, ONE_MB);
cudaMemcpy(c_array, array1, ONE_MB, cudaMemcpyHostToDevice);
//testKernel2<<<256,1024>>>(c_array,c_arrayCopy);
cudaEventRecord(start,0);
cudaMemcpy(arrayCopy, c_arrayCopy, ONE_MB, cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
time_in_ms = 0.0f;
cudaEventElapsedTime(&time_in_ms,start,stop);
printf("Time to copy %d bytes from device to host = %fms\n",ONE_MB,time_in_ms);
printf("Effective bandwidth = %f GB/s \n\n", ONE_MB/time_in_ms/1e6);
cudaFree(c_array);
cudaFree(c_arrayCopy);
free(array1);
free(arrayCopy);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//============TASK 3 end==================================
//============TASK 4====================================
arrayCopy = (int *) malloc(SIXTEEN_MB);
array1 = (int *) malloc(SIXTEEN_MB);
for (int i = 0; i < (SIXTEEN_MB)/sizeof(int); i++) {
array1[i] = i;
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc((void **)&c_array, 16*ONE_MB);
cudaMalloc((void **)&c_arrayCopy, 16*ONE_MB);
cudaEventRecord(start,0);
cudaMemcpy(c_array, array1, SIXTEEN_MB, cudaMemcpyHostToDevice);
//testKernel2<<<4096,1024>>>(c_array,c_arrayCopy);
cudaMemcpy(arrayCopy, c_arrayCopy, SIXTEEN_MB, cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
time_in_ms = 0.0f;
cudaEventElapsedTime(&time_in_ms,start,stop);
printf("Time to copy %d bytes from device to host = %fms\n",SIXTEEN_MB,time_in_ms);
printf("Effective bandwidth = %f GB/s \n\n", (SIXTEEN_MB)/time_in_ms/1e6);
cudaFree(c_array);
cudaFree(c_arrayCopy);
free(array1);
free(arrayCopy);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//===================TASK 4 end========================
//===================TASK 5============================
int *c_array1,*c_array2, number_of_steams = 2;
int *resultArray1, *resultArray2;
cudaMallocHost((void **)&resultArray1, SIXTEEN_MB);
cudaMallocHost((void **)&resultArray2, SIXTEEN_MB);
cudaMallocHost((void **)&array1, SIXTEEN_MB);
cudaMalloc((void **)&c_array1, SIXTEEN_MB);
cudaMalloc((void **)&c_array2, SIXTEEN_MB);
for (int i = 0; i < (SIXTEEN_MB)/sizeof(int); i++) {
array1[i] = i;
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaStream_t stream[number_of_steams];
for (int i = 0; i < number_of_steams; i++) {
cudaStreamCreate(&stream[i]);
}
cudaEventRecord(start,0);
//for(int i=0;i<number_of_steams;i++){
cudaMemcpyAsync(c_array1,array1,SIXTEEN_MB,cudaMemcpyHostToDevice,stream[0]);
cudaMemcpyAsync(c_array2,array1,SIXTEEN_MB,cudaMemcpyHostToDevice,stream[1]);
//}
cudaMemcpyAsync(resultArray1,c_array1,SIXTEEN_MB,cudaMemcpyDeviceToHost,stream[0]);
cudaMemcpyAsync(resultArray2,c_array2,SIXTEEN_MB,cudaMemcpyDeviceToHost,stream[1]);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
time_in_ms = 0.0;
cudaEventElapsedTime(&time_in_ms,start,stop);
printf("Time for async data transfer of 2 arrays each of %d bytes multiple arrays is = %fms\n",SIXTEEN_MB, time_in_ms);
printf("Bandwidth for async data transfer of 2 arrays is : %f \n\n",(SIXTEEN_MB)/time_in_ms/1e6);
cudaFree(c_array1);
cudaFree(c_array2);
cudaFreeHost(resultArray1);
cudaFreeHost(resultArray2);
cudaFreeHost(array1);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//==================TASK 5 end=========================
//=================TASK 6================================
cudaEventCreate(&start);
cudaEventCreate(&stop);
int *array4mb = (int *) malloc(FOUR_MB*sizeof(int));
int width = 1024;
for(int i=0; i<FOUR_MB; i++){
array4mb[i] = i;
}
int *cuda_array_4mb, *transpose_4MB;
cudaMalloc((void **)&cuda_array_4mb,FOUR_MB*(sizeof(int)));
cudaMalloc((void **)&transpose_4MB,FOUR_MB*(sizeof(int)));
cudaMemcpy(cuda_array_4mb, array4mb, FOUR_MB*(sizeof(int)), cudaMemcpyHostToDevice);
cudaEventRecord(start,0);
testKernel3<<<4096,1024>>>(cuda_array_4mb, transpose_4MB,width);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaMemcpy(array4mb, transpose_4MB, FOUR_MB*(sizeof(int)), cudaMemcpyDeviceToHost);
time_in_ms = 0.0;
cudaEventElapsedTime(&time_in_ms,start,stop);
printf("Bandwidth for array of dimension %d is %fGFLOPS\n\n",FOUR_MB,(FOUR_MB)/time_in_ms/1e6);
//=================Task 6 end============================
err=cudaGetLastError();
if(err!=cudaSuccess) {
fprintf(stderr,"Error executing the kernel - %s\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties,0);
printf("Device name: %s\n", deviceProperties.name);
printf("Memory Clock Rate (KHz): %d\n",deviceProperties.memoryClockRate);
printf("Memory Bus Width (bits): %d\n",deviceProperties.memoryBusWidth);
printf("Peak Memory Bandwidth (GB/s): %f\n",
2.0*deviceProperties.memoryClockRate*(deviceProperties.memoryBusWidth/8)/1.0e6);
printf("Registers per multiprocessors: %d\n\n",deviceProperties.regsPerMultiprocessor);
}
|
8,203 | #include <stdio.h>
#include <cuda.h>
__global__ void dkernel(unsigned *nthreads) {
if (threadIdx.x == 0 && blockIdx.x == 0 && threadIdx.y == 0 && blockIdx.y == 0 && threadIdx.z == 0 && blockIdx.z == 0) {
//if (threadIdx.x == 0) {
printf("%d %d %d %d %d %d.\n", gridDim.x, gridDim.y, gridDim.z, blockDim.x, blockDim.y, blockDim.z);
}
atomicInc(nthreads, 1000000);
}
int main() {
dim3 grid(2, 3, 4);
dim3 block(5, 6, 7);
unsigned *nthreads, hnthreads = 0;
cudaMalloc(&nthreads, sizeof(unsigned));
cudaMemcpy(nthreads, &hnthreads, sizeof(unsigned), cudaMemcpyHostToDevice);
dkernel<<<grid, block>>>(nthreads);
cudaMemcpy(&hnthreads, nthreads, sizeof(unsigned), cudaMemcpyDeviceToHost);
printf("%d\n", hnthreads);
return 0;
}
|
8,204 | #include <stdio.h>
int N = 4096;
__global__ void vadd(float* A, float* B, float* C){
C[blockIdx.x] = A[blockIdx.x] + B[blockIdx.x];
}
int main(){
float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
h_A = new float[N];
h_B = new float[N];
h_C = new float[N];
cudaMalloc((void **)&d_A, N*sizeof(float));
cudaMalloc((void **)&d_B, N*sizeof(float));
cudaMalloc((void **)&d_C, N*sizeof(float));
for(int i = 0; i < N; i++){
h_A[i] = i; h_B[i] = i;
}
cudaMemcpy(d_A, h_A, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, N*sizeof(float), cudaMemcpyHostToDevice);
vadd<<<N,1>>>(d_A, d_B, d_C);
cudaMemcpy(h_C, d_C, N*sizeof(float), cudaMemcpyDeviceToHost);
bool flag = true;
for(int i = 0; i < N; i++){
if(h_C[i] != h_A[i]+h_B[i]){
flag = false;
break;
}
}
if(flag) printf("Correct\n");
else printf("Incorrect\n");
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
delete[] h_A;
delete[] h_B;
delete[] h_C;
return 0;
}
|
8,205 | /*
Modified from
https://github.com/sshaoshuai/PCDet/blob/master/pcdet/ops/roipoint_pool3d/src/roipoint_pool3d_kernel.cu
Point cloud feature pooling
Written by Shaoshuai Shi
All Rights Reserved 2018.
*/
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
__device__ inline void lidar_to_local_coords(float shift_x, float shift_y,
float rz, float &local_x,
float &local_y) {
float cosa = cos(-rz), sina = sin(-rz);
local_x = shift_x * cosa + shift_y * (-sina);
local_y = shift_x * sina + shift_y * cosa;
}
__device__ inline int check_pt_in_box3d(const float *pt, const float *box3d,
float &local_x, float &local_y) {
// param pt: (x, y, z)
// param box3d: (cx, cy, cz, dx, dy, dz, rz) in LiDAR coordinate, cz in the
// bottom center
float x = pt[0], y = pt[1], z = pt[2];
float cx = box3d[0], cy = box3d[1], cz = box3d[2];
float dx = box3d[3], dy = box3d[4], dz = box3d[5], rz = box3d[6];
cz += dz / 2.0; // shift to the center since cz in box3d is the bottom center
if (fabsf(z - cz) > dz / 2.0) return 0;
lidar_to_local_coords(x - cx, y - cy, rz, local_x, local_y);
float in_flag = (local_x > -dx / 2.0) & (local_x < dx / 2.0) &
(local_y > -dy / 2.0) & (local_y < dy / 2.0);
return in_flag;
}
__global__ void assign_pts_to_box3d(int batch_size, int pts_num, int boxes_num, const float *xyz, const float *boxes3d, int *pts_assign){
// params xyz: (B, N, 3)
// params boxes3d: (B, M, 7)
// params pts_assign: (B, N, M): idx of the corresponding box3d, -1 means background points
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
int box_idx = blockIdx.y;
int bs_idx = blockIdx.z;
if (pt_idx >= pts_num || box_idx >= boxes_num || bs_idx >= batch_size){
return;
}
int assign_idx = bs_idx * pts_num * boxes_num + pt_idx * boxes_num + box_idx;
pts_assign[assign_idx] = 0;
int box_offset = bs_idx * boxes_num * 7 + box_idx * 7;
int pt_offset = bs_idx * pts_num * 3 + pt_idx * 3;
float local_x = 0, local_y = 0;
int cur_in_flag = check_pt_in_box3d(xyz + pt_offset, boxes3d + box_offset, local_x, local_y);
pts_assign[assign_idx] = cur_in_flag;
// printf("bs=%d, pt=%d, in=%d\n", bs_idx, pt_idx, pts_assign[bs_idx * pts_num + pt_idx]);
}
__global__ void get_pooled_idx(int batch_size, int pts_num, int boxes_num, int sampled_pts_num,
const int *pts_assign, int *pts_idx, int *pooled_empty_flag){
// params xyz: (B, N, 3)
// params pts_feature: (B, N, C)
// params pts_assign: (B, N)
// params pts_idx: (B, M, 512)
// params pooled_empty_flag: (B, M)
int boxes_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (boxes_idx >= boxes_num){
return;
}
int bs_idx = blockIdx.y;
int cnt = 0;
for (int k = 0; k < pts_num; k++){
if (pts_assign[bs_idx * pts_num * boxes_num + k * boxes_num + boxes_idx]){
if (cnt < sampled_pts_num){
pts_idx[bs_idx * boxes_num * sampled_pts_num + boxes_idx * sampled_pts_num + cnt] = k;
cnt++;
}
else break;
}
}
if (cnt == 0){
pooled_empty_flag[bs_idx * boxes_num + boxes_idx] = 1;
}
else if (cnt < sampled_pts_num){
// duplicate same points for sampling
for (int k = cnt; k < sampled_pts_num; k++){
int duplicate_idx = k % cnt;
int base_offset = bs_idx * boxes_num * sampled_pts_num + boxes_idx * sampled_pts_num;
pts_idx[base_offset + k] = pts_idx[base_offset + duplicate_idx];
}
}
}
__global__ void roipool3d_forward(int batch_size, int pts_num, int boxes_num, int feature_in_len, int sampled_pts_num,
const float *xyz, const int *pts_idx, const float *pts_feature,
float *pooled_features, int *pooled_empty_flag){
// params xyz: (B, N, 3)
// params pts_idx: (B, M, 512)
// params pts_feature: (B, N, C)
// params pooled_features: (B, M, 512, 3+C)
// params pooled_empty_flag: (B, M)
int sample_pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
int box_idx = blockIdx.y;
int bs_idx = blockIdx.z;
if (sample_pt_idx >= sampled_pts_num || box_idx >= boxes_num || bs_idx >= batch_size){
return;
}
if (pooled_empty_flag[bs_idx * boxes_num + box_idx]){
return;
}
int temp_idx = bs_idx * boxes_num * sampled_pts_num + box_idx * sampled_pts_num + sample_pt_idx;
int src_pt_idx = pts_idx[temp_idx];
int dst_feature_offset = temp_idx * (3 + feature_in_len);
for (int j = 0; j < 3; j++)
pooled_features[dst_feature_offset + j] = xyz[bs_idx * pts_num * 3 + src_pt_idx * 3 + j];
int src_feature_offset = bs_idx * pts_num * feature_in_len + src_pt_idx * feature_in_len;
for (int j = 0; j < feature_in_len; j++)
pooled_features[dst_feature_offset + 3 + j] = pts_feature[src_feature_offset + j];
}
void roipool3dLauncher(int batch_size, int pts_num, int boxes_num, int feature_in_len, int sampled_pts_num,
const float *xyz, const float *boxes3d, const float *pts_feature, float *pooled_features, int *pooled_empty_flag){
// printf("batch_size=%d, pts_num=%d, boxes_num=%d\n", batch_size, pts_num, boxes_num);
int *pts_assign = NULL;
cudaMalloc(&pts_assign, batch_size * pts_num * boxes_num * sizeof(int)); // (batch_size, N, M)
// cudaMemset(&pts_assign, -1, batch_size * pts_num * boxes_num * sizeof(int));
dim3 blocks(DIVUP(pts_num, THREADS_PER_BLOCK), boxes_num, batch_size); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
assign_pts_to_box3d<<<blocks, threads>>>(batch_size, pts_num, boxes_num, xyz, boxes3d, pts_assign);
int *pts_idx = NULL;
cudaMalloc(&pts_idx, batch_size * boxes_num * sampled_pts_num * sizeof(int)); // (batch_size, M, sampled_pts_num)
dim3 blocks2(DIVUP(boxes_num, THREADS_PER_BLOCK), batch_size); // blockIdx.x(col), blockIdx.y(row)
get_pooled_idx<<<blocks2, threads>>>(batch_size, pts_num, boxes_num, sampled_pts_num, pts_assign, pts_idx, pooled_empty_flag);
dim3 blocks_pool(DIVUP(sampled_pts_num, THREADS_PER_BLOCK), boxes_num, batch_size);
roipool3d_forward<<<blocks_pool, threads>>>(batch_size, pts_num, boxes_num, feature_in_len, sampled_pts_num,
xyz, pts_idx, pts_feature, pooled_features, pooled_empty_flag);
cudaFree(pts_assign);
cudaFree(pts_idx);
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
|
8,206 | /***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include "common.h"
__global__ void naive_kernel(float c0,float c1,float *A0,float *Anext, int nx, int ny, int nz)
{
int i = threadIdx.x;
int j = blockIdx.x+1;
int k = blockIdx.y+1;
if(i>0)
{
Anext[Index3D (nx, ny, i, j, k)] =
(A0[Index3D (nx, ny, i, j, k + 1)] +
A0[Index3D (nx, ny, i, j, k - 1)] +
A0[Index3D (nx, ny, i, j + 1, k)] +
A0[Index3D (nx, ny, i, j - 1, k)] +
A0[Index3D (nx, ny, i + 1, j, k)] +
A0[Index3D (nx, ny, i - 1, j, k)])*c1
- A0[Index3D (nx, ny, i, j, k)]*c0;
}
}
|
8,207 | #include "includes.h"
__global__ void myCudaCount(const char *text, int *pos, int n){
int bid = blockIdx.x;
int left = (blockIdx.y == 1)? bid * blockDim.x + ThreadSize/2: bid * blockDim.x;
int tid = threadIdx.x;
int id = left + tid;
__shared__ int BIT[ThreadSize][LOGT];
__shared__ int iBIT[ThreadSize];
int seqIndex = id * SeqSize;
int seq[SeqSize];
if (seqIndex < n){
// Transform
int count = 1;
for (int i=0; i<SeqSize; i++, count++){
if (seqIndex + i < n){
if (text[seqIndex + i] <= ' ')
count = 0;
}
else{
count = 0;
}
seq[i] = count;
}
iBIT[tid] = SeqSize - 1 - seq[SeqSize - 1];
BIT[tid][0] = seq[SeqSize - 1] / SeqSize;
__syncthreads();
// Build tree
int before = BIT[tid][0];
for (int i=1, offset=1; i<LOGT; i++, offset <<= 1){
int tmp = tid - offset;
if (tmp >= 0){
if (before != 0 && BIT[tmp][i-1] != 0){
before = (BIT[tid][i] = before + BIT[tmp][i-1]);
}
else
before = (BIT[tid][i] = 0);
}
else{
BIT[tid][i] = before;
}
__syncthreads();
}
// Set
int offset = tid - 1;
for (int i=LOGT-1; i>=0 && offset>=0; i--)
offset -= BIT[offset][i];
if (offset >= 0) offset = (left + offset) * SeqSize + iBIT[offset];
if (tid >= ThreadSize/2 || id < ThreadSize / 2){
for (int i=0; i<SeqSize && seqIndex+i<n; i++){
if (seq[i] == 0)
offset = seqIndex + i;
pos[seqIndex + i] = seqIndex + i - offset;
}
}
}
} |
8,208 | // includes, system
#include <stdlib.h>
// includes CUDA
#include <cuda_runtime.h>
__global__ void
testKernel(float *g_idata, float *g_odata)
{
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
unsigned int num_threads = 32;
unsigned int mem_size = sizeof(float) * num_threads;
// allocate host memory
float *h_idata = (float *) malloc(mem_size);
// initalize the memory
for (unsigned int i = 0; i < num_threads; ++i)
{
h_idata[i] = (float) i;
}
// allocate device memory
float *d_idata;
cudaMalloc((void **) &d_idata, mem_size);
// copy host memory to device
cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice);
// allocate device memory for result
float *d_odata;
cudaMalloc((void **) &d_odata, mem_size);
// setup execution parameters
dim3 grid(1, 1, 1);
dim3 threads(num_threads, 1, 1);
// execute the kernel
testKernel<<< grid, threads, mem_size >>>(d_idata, d_odata);
// allocate mem for the result on host side
float *h_odata = (float *) malloc(mem_size);
// copy result from device to host
cudaMemcpy(h_odata, d_odata, sizeof(float) * num_threads, cudaMemcpyDeviceToHost);
// cleanup memory
free(h_idata);
free(h_odata);
cudaFree(d_idata);
cudaFree(d_odata);
exit(EXIT_SUCCESS);
}
|
8,209 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error != cudaSuccess) \
{ \
printf("Error: %s: %d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
extern "C" __global__ void dotp(double* A, double* B, double* out){
int bx = blockIdx.x;
int bdx = blockDim.x;
int gdx = gridDim.x;
int tx = threadIdx.x;
int BLOCK_SIZE = 32;
double a[20], b[20];
/*** Global A,B -> Register a,b ***/
#pragma unroll
for(int i = 0; i < 20; i++){
a[i] = A[20*BLOCK_SIZE*bx + tx + BLOCK_SIZE*i];
}
#pragma unroll
for(int i = 0; i < 20; i++){
b[i] = B[20*BLOCK_SIZE*bx + tx + BLOCK_SIZE*i];
}
/**** dot production ***/
double o = 0;
for(int i = 0; i < 20; ++i){
o += a[i]*b[i];
}
/**** Register o -> Global out ***/
out[bx*BLOCK_SIZE + tx] = o;
}
extern "C" void dot(int size){
int N = size;
int T = 32;
// int T = atoi(argv[2]);
double * A, *B, *out;
A = (double*)malloc( N*N*N*20*sizeof(double));
B = (double*)malloc( N*N*N*20*sizeof(double) );
out = (double*)malloc( N*N*N*sizeof(double));
// initialize
for(int i = 0; i < N*N*N; ++i){
for(int j = 0; j < 20; ++j){
A[i*20 + j] = 100*i+j;
B[i*20 + j] = 1000*i+j;
}
out[i] = 0.0;
}
double *dA, *dB, *dout;
cudaMalloc( (void**)&dA, N*N*N*20*sizeof(double));
cudaMalloc( (void**)&dB, N*N*N*20*sizeof(double));
cudaMalloc( (void**)&dout, N*N*N*sizeof(double));
cudaMemcpy(A, dA, N*N*N*20*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(B, dB, N*N*N*20*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(out, dout, N*N*N*sizeof(double), cudaMemcpyHostToDevice);
dim3 grid(N*N*N/T);
dim3 block(T);
// StartTimer();
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
dotp<<<grid,block>>>(dA,dB,dout);
CHECK(cudaDeviceSynchronize());
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time, start, stop);
// double time = GetTimer(); // [ms]
double flops = 39*N*N*N / (time * 1e-3); // Flop/sec
printf("%d^3: time %f[ms], flops %f [GFlops]\n", N, time, flops * 1e-9);
cudaMemcpy(A, dA, N*N*N*20*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(B, dB, N*N*N*20*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(out, dout, N*N*N*sizeof(double), cudaMemcpyDeviceToHost);
free(out);
free(A);
free(B);
cudaFree(dout);
cudaFree(dA);
cudaFree(dB);
}
|
8,210 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
// Function that catches errors
// Function that catches the error
void testCUDA(cudaError_t error, const char *file, int line) {
if (error != cudaSuccess) {
printf("There is an error in file %s at line %d\n", file, line);
exit(EXIT_FAILURE);
}
}
// Has to be defined in the compilation in order to get the correct value of the
// macros __FILE__ and __LINE__
#define testCUDA(error) (testCUDA(error, __FILE__ , __LINE__))
// Global variables
#define d 10
#define N 10
#define P 55
#define NUMBER_BLOCKS 512
#define THREADS_PER_BLOCKS 4*d
// Function that performs the product of Jacobi rotations
/* This function will be performed N times by N different blocks */
__global__ void Compute_all_rotations(float *J, float *A, float *out, const int *pos_i, const int *pos_j){
__shared__ float temp[4 * d] ; // variable that is to be shared by the threads in the block
int block_j = blockDim.x * 4 * P ;
int idx_J;
int idx_A;
int idx_out;
for (int p=0 ; p<P ; p++) {
// index = threadIdx.x + blockIdx.x*blockDim.x ;
// Perform multiplications
if (threadIdx.x % 4 == 0 ) {
idx_J = 0 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 1 ) {
idx_J = 1 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 2 ) {
idx_J = 2 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 3 ) {
idx_J = 3 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
temp[threadIdx.x] = J[idx_J] * A[idx_A] ;
__syncthreads(); // synchronize threads
// Perform additions
if (threadIdx.x % 2 == 0){
if (threadIdx.x % 4 == 0){
idx_out = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 2){
idx_out = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
out[idx_out] = temp[threadIdx.x] + temp[threadIdx.x + 1] ;
}
__syncthreads(); // synchronize threads
}
}
// The folowing function reads the data stored in csv files and stores it in arrays
void read_file(const char * fname, float *array, int pos){
FILE *file;
char tampon[sizeof(float)];
int actuel = 0;
char c;
int count;
file = fopen (fname, "r");
while ((c = fgetc(file)) != EOF) {
if (c == ';' || c == '\n') {
array[pos + count] = atof(tampon);
actuel = 0;
memset(tampon, 0, sizeof tampon);
} else {
tampon[actuel++] = c;
}
}
printf("TEST\n");
fclose (file);
}
void get_data(float *J, float *A){
char fname[100] = {0};
for (int n = 0 ; n<N ; n++){
for (int p = 0 ; p<P ; p++){
snprintf (fname, 100, "files/%i/J_%i.txt", n, p);
read_file (fname, J, P*n*4 + 4*p);
}
snprintf (fname, 100, "files/%i/A.txt", n);
read_file (fname, A, n*d*d);
}
}
void write_result(float* out){
FILE *file;
const char* str = "; ";
char fname[100] = {0};
for (int n=0 ; n<N ; n++) {
snprintf (fname, 100, "files/%i/out.txt", n);
file = fopen(fname, "w");
for (int i=0 ; i<d ; i++) {
for (int j=0 ; j<d ; j++) {
if (j == d-1) {
str = "\n";
}
fprintf(file, "%f %s", out[n*d*d + i*d +j], str);
str = "; ";
}
}
fclose(file);
}
}
void positions(int* pos_i, int* pos_j){
int shift = 0;
for(int i=0 ; i<P ; i++){
pos_i[i] = floor((i + shift) / d);
pos_j[i] = (i + shift) % d;
if((i + shift) % d == d-1){
shift++;
}
}
}
int main(){
// Properties of our GPUs
cudaDeviceProp prop ;
int count ;
cudaGetDeviceCount(&count) ;
for(int i=0 ; i<count ; i++) {
cudaGetDeviceProperties(&prop, i) ;
printf("Taille totale de la mémoire globale %ld\n", prop.totalGlobalMem) ;
}
// Define J A and out
float J [P*4*d*N];
float A [d*d*N];
float out [d*d*N];
get_data(J, A);
// device copies
float d_J [P*4*d*N];
float d_A [d*d*N];
float d_out [d*d*N];
int size = sizeof(float);
testCUDA(cudaMalloc((void **)&d_J, size));
testCUDA(cudaMalloc((void **)&d_A, size));
testCUDA(cudaMalloc((void **)&d_out, size));
testCUDA(cudaMemcpy(d_A, &A, size, cudaMemcpyHostToDevice));
testCUDA(cudaMemcpy(d_J, &J, size, cudaMemcpyHostToDevice));
// Define pos_i et pos_j
int pos_i [P];
int pos_j [P];
int *d_pos_i, *d_pos_j;
size = sizeof(int);
testCUDA(cudaMalloc((void **)&d_pos_i, size));
testCUDA(cudaMalloc((void **)&d_pos_j, size));
positions(pos_i, pos_j);
testCUDA(cudaMemcpy(d_pos_i, &pos_j, size, cudaMemcpyHostToDevice));
testCUDA(cudaMemcpy(d_pos_j, &pos_i, size, cudaMemcpyHostToDevice));
// Timer definition and start
float TimerV;
cudaEvent_t start, stop;
testCUDA(cudaEventCreate(&start));
testCUDA(cudaEventCreate(&stop));
testCUDA(cudaEventRecord(start, 0));
// Computing rotations
Compute_all_rotations<<<1,1>>>(d_J, d_A, d_out, d_pos_i, d_pos_j);
// Stopping timer
testCUDA(cudaEventRecord(stop, 0));
testCUDA(cudaEventSynchronize(stop));
testCUDA(cudaEventElapsedTime(&TimerV, start, stop));
printf("Exectudtion time: %f ms\n", TimerV);
// Copying and saving result
testCUDA(cudaMemcpy(&out, d_out, sizeof(float), cudaMemcpyDeviceToHost));
write_result(out);
// Cleanup
testCUDA(cudaFree(d_A));
testCUDA(cudaFree(d_J));
testCUDA(cudaFree(d_out));
testCUDA(cudaFree(d_pos_i));
testCUDA(cudaFree(d_pos_j));
testCUDA(cudaFree(start));
testCUDA(cudaFree(stop));
return 0;
} |
8,211 | #include "includes.h"
__device__ float f(float x)
{
return 4.f / (1.f + x * x);
}
__global__ void multMatrixGPU(const float *matrixA, const size_t rowA, const size_t columnA, const float *matrixB, const size_t rowB, const size_t columnB, float *matrixC)
{
__shared__ float ds_A[TILE_SIZE][TILE_SIZE];
__shared__ float ds_B[TILE_SIZE][TILE_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = blockIdx.y * blockDim.y + ty;
int col = blockIdx.x * blockDim.x + tx;
float cValue = 0.0;
for (int t = 0; t < (columnA - 1) / TILE_SIZE + 1; t++)
{
if (t * TILE_SIZE + tx < columnA && row < rowA)
{
ds_A[ty][tx] = matrixA[row * columnA + t * TILE_SIZE + tx];
} else {
ds_A[ty][tx] = 0.0;
}
if (t * TILE_SIZE + ty < rowB && col < columnB)
{
ds_B[ty][tx] = matrixB[(t * TILE_SIZE + ty) * columnB + col];
} else {
ds_B[ty][tx] = 0.0;
}
__syncthreads();
for (int i = 0; i < TILE_SIZE; i++) {
cValue += ds_A[ty][i] * ds_B[i][tx];
}
__syncthreads();
}
if (row < rowA && col < columnB) {
matrixC[row * columnB + col] = cValue;
}
} |
8,212 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndx(void)
{
printf("threadIdx:(%d, %d, %d) "
"blockIdx:(%d, %d, %d) "
"blockDim:(%d, %d, %d) "
"gridDim:(%d, %d, %d)\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z,
gridDim.x, gridDim.y, gridDim.z);
}
int main()
{
int nElem = 64;
/**
*here, we define 2 blocks, each of which has 3 threads.
*/
for (int i = 1; i < 5; ++i)
{
if (i == 1 || i % 2 == 0) {
dim3 block (nElem / i);
dim3 grid ((nElem + block.x - 1) / block.x);
printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d, block.y %d, block.z %d\n", block.x, block.y, block.z);
checkIndx <<<grid, block>>>();
}
}
cudaDeviceReset();
return 0;
}
|
8,213 | #include "includes.h"
#define min(X,Y) ((X) < (Y) ? (X) : (Y))
__global__ void accel_update(int nx, int ny, double dx2inv, double dy2inv, double* d_z, double* d_a) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
double ax, ay;
int r = i / nx;
int c = i % nx;
if(i < nx*ny) {
if(r<ny-1 && r>0 && c<nx-1 && c>0){
ax = (d_z[i+nx]+d_z[i-nx]-2.0*d_z[i])*dx2inv;
ay = (d_z[i+1]+d_z[i-1]-2.0*d_z[i])*dy2inv;
d_a[i] = (ax+ay)/2;
}
else
d_a[i] = 0.0;
}
} |
8,214 | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
static const int WORK_SIZE = 256;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
/**
* CUDA kernel function that reverses the order of bits in each element of the array.
*/
__global__ void vecSum_GPU3(const double* in, double* res, const unsigned long n)
{
//dynamic shared memory size
__shared__ double tmp[1024];
unsigned long i = blockIdx.x*blockDim.x+threadIdx.x;
if(i<n)
tmp[threadIdx.x] = in[i];
__syncthreads();
//do reduction in shared memory
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if(threadIdx.x < s)
{
tmp[threadIdx.x] += tmp[threadIdx.x + s];
}
__syncthreads();
}
if(threadIdx.x == 0) res[blockIdx.x] = tmp[0];
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(int argc, char** argv) {
if(argc<2)
{
printf("Not enough Arguments, please specify a size, Nigga!\n");
return 1;
}
int nDevices;
cudaGetDeviceCount(&nDevices);
int THREADS_PER_BLOCK = 0;
for (int i = 0; i < nDevices; i++)
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf(" Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Max Threads Per Block: %d\n\n", prop.maxThreadsPerBlock);
THREADS_PER_BLOCK = prop.maxThreadsPerBlock;
}
long vec_size = atol(argv[1]);
printf("Size of Vector: %d\n", vec_size);
int blocks = ceil((float)vec_size/THREADS_PER_BLOCK);
printf("Blocks: %d\n",blocks);
long vec_size_full = THREADS_PER_BLOCK * blocks; // Vector with Threads Per Block
printf("Size of Block Filling Vector: %d\n", vec_size);
double* vec = (double*)malloc(sizeof(double) * vec_size_full);
double* res = (double*)malloc(sizeof(double) * vec_size_full);
for(int i = 0; i < vec_size_full; i++)
{
if (i < vec_size)
vec[i]=1.0f;
else
vec[i]=0.0f;
res[i]=0;
}
printf("\n");
double* d_vec;
double* d_res;
cudaMalloc((double **) &d_vec, vec_size_full * sizeof(double));
cudaMalloc((double **) &d_res, vec_size_full * sizeof(double));
cudaMemcpy(d_vec,vec,vec_size_full*sizeof(double), cudaMemcpyHostToDevice);
vecSum_GPU3<<<blocks,THREADS_PER_BLOCK>>>(d_vec,d_res,vec_size);
cudaMemcpy(res, d_res, vec_size_full*sizeof(double), cudaMemcpyDeviceToHost);
for(int i = 0; i < blocks; i++)
{
printf("%f\n", res[i]);
}
CUDA_CHECK_RETURN(cudaDeviceReset());
return 0;
}
|
8,215 | #include <complex>
#include <iostream>
#include <sys/time.h>
#include <thrust/complex.h>
using namespace std;
__global__ void make_fractal(char *mat, int max_n, int max_row, int max_column){
int r = threadIdx.x;
using namespace std;
for(int c = 0; c < max_column; ++c){
thrust::complex<float> z;
int n = 0;
while(z.real() < 2 && ++n < max_n)
z = pow(z, 2) + decltype(z)(
(float)c * 2 / max_column - 1.5,
(float)r * 2 / max_row - 1
);
mat[r*max_column + c]=(n == max_n ? '#' : '.');
}
}
int main(int argc, char *argv[]){
int max_row, max_column, max_n, print;
if (argc != 5){
std::cout << "Faltam argumentos, devem ser no formato: ./executavel max_row max_column max_n" << std::endl;
exit(-1);
}
max_row = atoi(argv[1]);
max_column = atoi(argv[2]);
max_n = atoi(argv[3]);
print = atoi(argv[4]);
char *mat;
cudaMallocManaged(&mat, max_row*max_column*sizeof(unsigned char));
timeval start, end;
gettimeofday(&start, NULL);
make_fractal<<<1, max_row>>>(mat, max_n, max_row, max_column);
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
std::cout << "compute time: " << runtime << " s\n";
if(print){
for(int r = 0; r < max_row; ++r){
for(int c = 0; c < max_column; ++c)
std::cout << mat[r*max_column + c];
cout << '\n';
}
}
}
|
8,216 | #ifdef _GLIBCXX_USE_INT128
#undef _GLIBCXX_USE_INT128
#endif
#ifdef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_ATOMIC_BUILTINS
#endif
#include <thrust/transform_reduce.h>
#include <thrust/device_vector.h>
#include <thrust/pair.h>
#include <stdlib.h> // for rand()
// This example shows how to compute a bounding box
// for a set of points in two dimensions.
struct point2d
{
float x, y;
__host__ __device__
point2d() {}
__host__ __device__
point2d(float _x, float _y) : x(_x), y(_y) {}
};
// bounding box type
typedef thrust::pair<point2d, point2d> bbox;
// reduce a pair of bounding boxes (a,b) to a bounding box containing a and b
struct bbox_reduction : public thrust::binary_function<bbox,bbox,bbox>
{
__host__ __device__
bbox operator()(bbox a, bbox b)
{
// lower left corner
point2d ll(min(a.first.x, b.first.x), min(a.first.y, b.first.y));
// upper right corner
point2d ur(max(a.second.x, b.second.x), max(a.second.y, b.second.y));
return bbox(ll, ur);
}
};
// convert a point to a bbox containing that point, (point) -> (point, point)
struct bbox_transformation : public thrust::unary_function<point2d,bbox>
{
__host__ __device__
bbox operator()(point2d point)
{
return bbox(point, point);
}
};
int main(void)
{
const size_t N = 40;
// allocate storage for points
thrust::device_vector<point2d> points(N);
// generate some random points in the unit square
for(size_t i = 0; i < N; i++)
points[i] = point2d( ((float) rand() / (RAND_MAX + 1.0)), ((float) rand() / (RAND_MAX + 1.0)) );
// initial bounding box contains first point
bbox init = bbox(points[0], points[0]);
// transformation operation
bbox_transformation unary_op;
// binary reduction operation
bbox_reduction binary_op;
// compute the bounding box for the point set
bbox result = thrust::transform_reduce(points.begin(), points.end(), unary_op, init, binary_op);
// print output
std::cout << "bounding box ";
std::cout << "(" << result.first.x << "," << result.first.y << ") ";
std::cout << "(" << result.second.x << "," << result.second.y << ")" << std::endl;
std::cout << "TEST PASSED\n";
return 0;
}
|
8,217 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<iostream>
#define NUM_NODES 5
using namespace std;
__global__ void CUDA_BFS_KERNEL(int *Va, int *Ea, bool *Fa, bool *Xa, int *Ca, bool *done)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id>NUM_NODES)
{
*done = false;
}
if(Fa[id]==true&&Xa[id]==false)
{
Fa[id] = false;
Xa[id] = true;
__syncthreads();
int start = Va[id];
int end = Va[id+1];
for(int i=start;i<end;i++)
{
int nid = Ea[i];
if(Xa[nid]==false)
{
Ca[nid] = Ca[id] + 1;
Fa[nid] = true;
*done = false;
}
}
}
}
// The BFS frontier corresponds to all the nodes being processed at the current level.
int main(int argc, char** argv)
{
int** graph = new int* [NUM_NODES];
int edges = 0;
for(int i=0;i<NUM_NODES;i++)
{
graph[i] = new int[NUM_NODES];
}
for(int i=0;i<NUM_NODES;i++)
{
for(int j=i+1;j<NUM_NODES;j++)
{
if(rand()%2==1)
{
edges+=1;
graph[i][j] = 1;
graph[j][i] = 1;
}
}
}
int* v = new int[NUM_NODES+1];
int* e = new int[2*edges];
int x = 0;
for(int i=0;i<NUM_NODES;i++)
{
v[i] = x;
for(int j=0;j<NUM_NODES;j++)
{
if(graph[i][j]!=0)
{
e[x] = j;
x+=1;
}
}
}
v[NUM_NODES] = x;
bool frontier[NUM_NODES] = { false };
bool visited[NUM_NODES] = { false };
int cost[NUM_NODES] = { 0 };
int source = 0;
frontier[source] = true;
int* Va;
cudaMalloc((void**)&Va, sizeof(int)*(NUM_NODES+1));
cudaMemcpy(Va, v, sizeof(int)*(NUM_NODES+1), cudaMemcpyHostToDevice);
int* Ea;
cudaMalloc((void**)&Ea, sizeof(int)*(2*edges));
cudaMemcpy(Ea, e, sizeof(int)*(2*edges), cudaMemcpyHostToDevice);
bool* Fa;
cudaMalloc((void**)&Fa, sizeof(bool)*NUM_NODES);
cudaMemcpy(Fa, frontier, sizeof(bool)*NUM_NODES, cudaMemcpyHostToDevice);
bool* Xa;
cudaMalloc((void**)&Xa, sizeof(bool)*NUM_NODES);
cudaMemcpy(Xa, visited, sizeof(bool)*NUM_NODES, cudaMemcpyHostToDevice);
int* Ca;
cudaMalloc((void**)&Ca, sizeof(int)*NUM_NODES);
cudaMemcpy(Ca, cost, sizeof(int)*NUM_NODES, cudaMemcpyHostToDevice);
int num_blks = 1;
int threads = 5;
bool done;
bool* d_done;
cudaMalloc((void**)&d_done, sizeof(bool));
do {
done = true;
cudaMemcpy(d_done, &done, sizeof(bool), cudaMemcpyHostToDevice);
CUDA_BFS_KERNEL <<<num_blks, threads >>>(Va, Ea, Fa, Xa, Ca, d_done);
cudaMemcpy(&done, d_done , sizeof(bool), cudaMemcpyDeviceToHost);
} while (!done);
cudaMemcpy(cost, Ca, sizeof(int)*NUM_NODES, cudaMemcpyDeviceToHost);
cout<<"Cost: "<<endl;
for(int i=0;i<NUM_NODES;i++)
{
cout<<cost[i]<<" ";
}
cout<<endl;
return 0;
}
|
8,218 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/random.h>
#include <thrust/extrema.h>
int my_rand(void)
{
static thrust::default_random_engine rng;
static thrust::uniform_int_distribution<int> dist(0, 9999);
return dist(rng);
}
int main(void)
{
// generate random data on the host
thrust::host_vector<int> h_vec(100);
thrust::generate(h_vec.begin(), h_vec.end(), my_rand);
// transfer to device
thrust::device_vector<int> d_vec = h_vec;
// max element
thrust::device_vector<int>::iterator iter = thrust::max_element(d_vec.begin(), d_vec.end());
size_t idx = (size_t)(iter - d_vec.begin());
int max = *iter;
// print the sum
std::cout << "idx: " << idx << " max: " << max << std::endl;
return 0;
}
|
8,219 | #ifndef GLOBAL_CUBIC_CU
#define GLOBAL_CUBIC_CU
#include <thrust/complex.h>
using namespace thrust;
extern "C" __global__ void cubic(const double* B,
const double* C,
const double* D,
long n,
double* x)
{
long i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
double b = B[i], c = C[i], d = D[i];
complex<double> delta, u, v, m, n, w, r0, r1, r2;
u = (9 * b * c - 27 * d - 2 * b * b * b) / 54;
delta = 3 * (4 * pow(c, 3) - pow(b, 2) * pow(c, 2) - 18 * b * c * d + 27 * pow(d, 2) + 4 * pow(b, 3) * d);
v = sqrt(delta) / 18;
m = abs(u + v) >= abs(u - v) ? pow(u + v, 1. / 3) : pow(u - v, 1. / 3);
n = abs(m) > 1e-8 ? (pow(b, 2) - 3 * c) / (m * 9) : 0;
w.real(-0.5); w.imag(0.5 * sqrt(3.0));
r0 = m + n - b / 3;
r1 = w * m + w * w * n - b / 3;
r2 = w * w * m + w * n - b / 3;
if (abs(r0.imag()) > 1e-8) r0.real(0);
if (abs(r1.imag()) > 1e-8) r1.real(0);
if (abs(r2.imag()) > 1e-8) r2.real(0);
x[i] = fmax(fmax(r0.real(), r1.real()), r2.real());
}
}
#endif |
8,220 | #include "final.cu"
int main(){
float p_value = hypothesis_test(5, 0.05, 3);
printf("%.4f \n", p_value);
}
|
8,221 | extern "C"
{
__global__ void serialsum(const int n, const double *x, double *y)
{
y[0]=x[0];
for (int i = 1; i<n; i++)
{
y[0]+=x[i];
}
}
} |
8,222 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <math_constants.h>
extern "C"
{
__device__ float normal_pdf(float x){
float returner = 0;
returner = expf(-1 * (pow(x,2)) / 2) / sqrt(2 * (3.141592653));
return returner;
}// normal_pdf
__device__ float exp_pdf(float x, float lambda){
float returner = 0;
returner = lambda * expf(-1 * lambda * x);
return returner;
}// exp_pdf
__constant__ int seed_a = 1234, seed_b = 1423, seed_c = 1842;
__global__ void
mc_integration_normal_kernel(int *vals, int vals_length, int n,
float quantile, float sigma_out)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
// Declaration of other needed variables
float x_uni = 0, y_uni = 0;
// Only evaluate if thread is within the range of values we need
if(idx < n){
// Setup the RNG:
curandState rng;
curand_init(seed_a + idx * seed_b, seed_c, 0, &rng);
// Provides x uniform on quantile to sigma_out
x_uni = (sigma_out - quantile) * curand_uniform(&rng) + quantile;
// Provides y uniform on 0 to 1
y_uni = curand_uniform(&rng);
if(y_uni < normal_pdf(x_uni)){
vals[idx] = 1;
}else{
vals[idx] = 0;
}//ifelse
}// if(idx < n)
return;
} // mc_integration_normal_kernel
__global__ void
mc_integration_normal_vegas_kernel(float *vals, int vals_length, int n,
float quantile, float lambda)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
// Declaration of other needed variables
float x_exp = 0, testval = 0;
// Only evaluate if thread is within the range of values we need
if(idx < n){
// Setup the RNG:
curandState rng;
curand_init(seed_a + idx * seed_b, seed_c, 0, &rng);
// Provides random value from exponential(lambda)
x_exp = -log(curand_uniform(&rng))/lambda;
testval = normal_pdf(x_exp) / exp_pdf(x_exp, lambda);
if(x_exp > quantile){
vals[idx] = testval;
}else{
vals[idx] = 0;
}//ifelse
}// if(idx < n)
return;
} // mc_integration_normal_vegas_kernel
__global__ void
mc_integration_normal_kernel_2(float *vals, int vals_length, int N,
float *quantiles, int quantile_num, float sigma_out)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
// Declaration of other needed variables
float x_uni = 0, y_uni = 0, quantile;
int counter = 0, i = 0;
// Only evaluate if thread is within the range of values we need
if(idx < quantile_num){
// Setup the RNG:
curandState rng;
curand_init(seed_a + idx * seed_b, seed_c, 0, &rng);
quantile = quantiles[idx];
for(i = 0; i < N; i++){
// Provides x uniform on quantile to sigma_out
x_uni = (sigma_out - quantile) * curand_uniform(&rng) + quantile;
// Provides y uniform on 0 to 1
y_uni = curand_uniform(&rng);
if(y_uni < normal_pdf(x_uni)){
counter++;
}// if
}// for
vals[idx] = counter * (sigma_out - quantile) / N;
}// if(idx < n)
return;
} // mc_integration_normal_kernel
} // END extern "C"
|
8,223 | #include <cuda.h>
#include <stdio.h>
#include <math.h>
#define blockSize 512
#define real float
template <int bSize> __global__ void redukcja (int N, real* v, real* out)
{
size_t s = threadIdx.x + blockIdx.x * blockDim.x*2;
int sID = threadIdx.x;
size_t i;
__shared__ real pom[blockSize];
pom[sID] = 0;
if (s<N/2)
pom[sID] = v[s] + v[s + blockDim.x];
__syncthreads();
if (bSize >=512) {
if (sID<256) pom[sID] += pom[sID + 256]; __syncthreads();
}
if (bSize >=256) {
if (sID<128) pom[sID] += pom[sID + 128]; __syncthreads();
}
if (bSize >= 128){
if (sID<64) pom[sID] += pom[sID + 64]; __syncthreads();
}
if (sID < 32){
if (bSize >= 64) pom[sID] += pom[sID + 32];
__syncthreads();
if (bSize >= 32) pom[sID] += pom[sID + 16];
__syncthreads();
if (bSize >= 16) pom[sID] += pom[sID + 8];
__syncthreads();
if (bSize >= 8) pom[sID] += pom[sID + 4];
__syncthreads();
if (bSize >= 4) pom[sID] += pom[sID + 2];
__syncthreads();
if (bSize >= 2) pom[sID] += pom[sID + 1];
__syncthreads();
}
if (sID==0) out[blockIdx.x] = pom[0];
}
__global__ void redukcja2 (int N, real* v, real* out)
{
size_t s = threadIdx.x + blockIdx.x * blockDim.x;
int sID = threadIdx.x;
size_t i;
__shared__ real pom[blockSize];
pom[sID] = 0;
if (s<N)
pom[sID] = v[s];
__syncthreads();
for (i=blockDim.x/2; i>0; i>>=1){
if (sID<i){
pom[sID] += pom[sID + i];
}
__syncthreads();
}
if (sID==0) out[blockIdx.x] = pom[0];
}
__global__ void wypelnij (int N, real* v)
{
size_t s = threadIdx.x + blockIdx.x * blockDim.x;
if (s<N) {
v[s] = sin(s * 2. * M_PI / 10.);
}
}
int main ()
{
size_t N = blockSize * blockSize * blockSize;
int blocks = (N + blockSize-1) / blockSize;
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
real* v;
cudaMalloc( (void**) &v, N * sizeof(real) );
real* outV;
cudaMalloc( (void**) &outV, blockSize * blockSize * sizeof(real) );
real* outVV;
cudaMalloc( (void**) &outVV, blockSize * sizeof(real) );
real out;
int i;
int M = 10;
wypelnij <<<blocks, blockSize>>> (N, v);
cudaEventRecord(event1, 0);
for (i=0; i<M; i++){
redukcja<blockSize><<<blocks/2, blockSize>>> (N, v, outV);
redukcja<blockSize><<<blockSize/2, blockSize>>> (blockSize*blockSize, outV, outVV);
redukcja2<<<1, blockSize>>> (blockSize, outVV, v);
}
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
cudaMemcpy (&out, v, 1 * sizeof(real), cudaMemcpyDeviceToHost);
printf ("Czas redukcji: %f ms wynik; %f\n", dt_ms * 1./M, out);
return 0;
}
|
8,224 |
/*
* This is the jacobi relaxation method in gpu
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#define SIZE 2048
#define BLOCK_SIZE 32
#define NITER 10000
float ratio(float*u, float ant, int iter){
float tmp=0.0;
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
if(u[j*SIZE+i]>tmp)
tmp=u[j*SIZE+i];
}
}
printf(" iter=%d ratio=%f ant=%f max=%f\n",iter,tmp/ant,ant,tmp);
return tmp;
}
__global__ void jacobi(float *d_u_new, float *d_u, float *d_f, float h2){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i>0 && j>0 && i<SIZE-1 && j<SIZE-1){
int posicion = i*SIZE+j;
d_u_new[i*SIZE+j]=0.25*(
h2*d_f[posicion]+
d_u[posicion-1]+
d_u[posicion+1]+
d_u[posicion-SIZE]+
d_u[posicion+SIZE]);
}
}
int main(){
float * h_u, * h_f;
float * d_u, * d_u_new, * d_f;
float * tmp;
float ant = 1.0;
int i,j;
size_t size;
float h = 1.0/SIZE;
/* Host memory malloc */
size = SIZE*SIZE*sizeof(float);
printf("We need %dmb of memory\n",3*size/1024/1024);
h_u = (float*)malloc(size);
h_f = (float*)malloc(size);
/* memory for the gpu */
cudaMalloc(&d_u, size);
cudaMalloc(&d_u_new, size);
cudaMalloc(&d_f, size);
/* Initialization */
for(i=0;i<SIZE; i++){
for(j=0; j<SIZE; j++){
h_f[i*SIZE+j]=0.0;
h_u[i*SIZE+j]=rand();
}
}
/* Bounds */
for(i=0;i<SIZE;i++){
h_u[i]=0.0;
h_u[i*SIZE]=0.0;
h_u[i*SIZE+SIZE-1]=0.0;
h_u[SIZE*(SIZE-1)+i]=0.0;
}
/* Copy from host to device */
cudaMemcpy(d_f,h_f,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_u,h_u,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_u_new,h_u,size,cudaMemcpyHostToDevice);
/* Grid dimension */
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(SIZE/BLOCK_SIZE,SIZE/BLOCK_SIZE);
float h2=h*h;
/* Call NITER times to the jacobi method */
for(i=0;i<NITER;i++){
jacobi<<<dimGrid,dimBlock>>>(d_u_new,d_u,d_f,h2);
if(i%1000==0){
cudaMemcpy(h_u, d_u_new, size, cudaMemcpyDeviceToHost);
ant=ratio(h_u,ant,i);
}
tmp=d_u_new;
d_u_new=d_u;
d_u=tmp;
}
/* free memory */
free(h_u);
free(h_f);
cudaFree(d_u_new);
cudaFree(d_u);
cudaFree(d_f);
}
|
8,225 | #include <stdio.h>
#define THREADS 10
#define N 100
#define A(i,j) A[i*N+j]
#define B(i,j) B[i*N+j]
#define C(i,j) C[i*N+j]
//CUDA kernel
__device__ float add(float a, float b)
{
int c;
c = a + b;
return c;
}
__global__ void matAdd(float *A, float *B, float *C)
{
int i = blockDim.x*blockIdx.x+threadIdx.x;
int j = blockDim.y*blockIdx.y+threadIdx.y;
C(i,j) = add(A(i,j), B(i,j));
}
int main()
{
float A[N][N], B[N][N], C[N][N], C2[N][N], *A_d, *B_d, *C_d;
int i,j;
dim3 dimBlock(THREADS);
dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x);
for (i=0; i<N; i++) {
for(j=0;j<N;j++) {
A[i][j] = i*2;
B[i][j] = N-i;
C2[i][j] = A[i][j] + B[i][j]; // C2 is used to check the resultsa
}
}
//Allocate array on device
cudaMalloc((void**) &A_d, sizeof(float)*N*N);
cudaMalloc((void**) &B_d, sizeof(float)*N*N);
cudaMalloc((void**) &C_d, sizeof(float)*N*N);
//Copy array from host memory to device memory
cudaMemcpy(A_d, A, sizeof(float)*N*N, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, sizeof(float)*N*N, cudaMemcpyHostToDevice);
matAdd<<<dimGrid, dimBlock>>>(A_d, B_d, C_d);
//Copy the result back
cudaMemcpy(C, C_d, sizeof(float)*N*N, cudaMemcpyDeviceToHost);
//Check the results
for (i=0;i<N;i++)
for(j=0;j<N;j++)
if (C[i][j] != C2[i][j]) printf("%d %f %f\n", i, C[i][j], C2[i][j]);
//Free memory on the device
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
}
|
8,226 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#define MAX_DIST 5000
typedef struct graphVertices {
int startIndex;
int numberOfNeighbours;
} graphVertices;
__global__ void bfs_unoptimized(graphVertices* graphVertice_gpu, int* NeighboursVertices_gpu, int numberOfNodes, int* result_gpu, bool* gpu_done) {
// Thread index
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int numOfThreads = blockDim.x * gridDim.x;
// All the threads traverse the adjancy list
for(int v = 0; v < numberOfNodes; v += numOfThreads) {
// different index for different thread
int vertex = v + tid;
// check boundary condition
if(vertex < numberOfNodes) {
// traverse all the neighbouring vertex concurrently
for(int n=0;n<graphVertice_gpu[vertex].numberOfNeighbours;n++) {
int neighbour = NeighboursVertices_gpu[graphVertice_gpu[vertex].startIndex + n];
// computation of the cost and traversal
if (result_gpu[neighbour] > result_gpu[vertex] + 1) {
result_gpu[neighbour] = result_gpu[vertex] + 1;
*gpu_done = 0;
}
}
}
}
}
int main( int argc, char* argv[] ) {
// input from user
int NUM_NODES = atoi(argv[1]);
// kernel parameters
int block_size = 1024;
int grid_size = NUM_NODES/block_size;
// structure declaration
graphVertices vertice[NUM_NODES];
// Array of neighbouring nodes
int neighbourVertices[NUM_NODES];
// populate the graph
for(int i=0;i<NUM_NODES;i++) {
vertice[i].numberOfNeighbours = 2;// (rand() % 5)+1;
}
vertice[0].startIndex = 0;
for(int j=1;j<NUM_NODES;j++) {
vertice[j].startIndex = vertice[j-1].startIndex + vertice[j-1].numberOfNeighbours;
}
for(int k=0;k<NUM_NODES*2;k++) {
neighbourVertices[k] = k+1;
}
int start_vertex = neighbourVertices[1];
cudaSetDevice(0);
// Time Variables
cudaEvent_t start, stop;
float time;
cudaEventCreate (&start);
cudaEventCreate (&stop);
// Variable declaration for GPU
graphVertices* graphVertice_gpu;
int* neighbourVertices_gpu;
int* result_gpu;
bool* gpu_done;
// Memory allocation for GPU variables
cudaMalloc((void**)&graphVertice_gpu, sizeof(graphVertices)*NUM_NODES);
cudaMalloc((void**)&neighbourVertices_gpu, sizeof(int)*NUM_NODES*2);
cudaMalloc((void**)&result_gpu, sizeof(int)*NUM_NODES);
cudaMalloc((void**)&gpu_done, sizeof(bool) * 1);
int kernel_call_count;
int* result_cpu;
bool* cpu_done = new bool[1];
result_cpu = new int[NUM_NODES];
for(int i=0;i<NUM_NODES;i++) {
result_cpu[i] = MAX_DIST;
}
result_cpu[start_vertex] = 0;
// Transfer data from CPU to GPU
cudaMemcpy(result_gpu, result_cpu, sizeof(int) * NUM_NODES, cudaMemcpyHostToDevice);
cudaMemcpy(graphVertice_gpu, vertice, sizeof(graphVertices)*NUM_NODES, cudaMemcpyHostToDevice);
cudaMemcpy(neighbourVertices_gpu, neighbourVertices, sizeof(int)*NUM_NODES*2, cudaMemcpyHostToDevice);
printf("Running parallel job.\n");
cudaEventRecord(start,0);
bool false_value = 1;
do
{
kernel_call_count++;
cudaMemcpy(gpu_done, &false_value, sizeof(bool) * 1, cudaMemcpyHostToDevice);
// call the kernel
bfs_unoptimized<<<grid_size, block_size>>>(graphVertice_gpu, neighbourVertices_gpu, NUM_NODES, result_gpu, gpu_done);
cudaMemcpy(cpu_done, gpu_done , sizeof(bool) * 1, cudaMemcpyDeviceToHost);
} while(*cpu_done != 0);
// Transfer result back from GPU to CPU
cudaMemcpy(result_cpu, result_gpu, sizeof(int)*NUM_NODES, cudaMemcpyDeviceToHost);
printf("Kernel call : %d\n", kernel_call_count);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Parallel Job Time: %.2f ms\n", time);
cudaFree(graphVertice_gpu);
cudaFree(neighbourVertices_gpu);
cudaFree(result_gpu);
cudaFree(gpu_done);
return 0;
}
|
8,227 | /*
* File: MLPNetwork.cu
* Author: ederperez
*/
namespace dnn
{
////////////////////////////////////////////////////////////////////////////////
///// GPU kernels and functions /////
////////////////////////////////////////////////////////////////////////////////
__global__ void _gpuSigmoid( int rows, int cols, const float* src, float* dst )
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
int index = j * cols + i;
if( i < cols && j < rows )
{
dst[index] = 1.0 / ( 1.0 + exp(-src[index]) );
}
}
__global__ void _gpuSigmoid( int rows, int cols, const double* src, double* dst )
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
int index = j * cols + i;
if( i < cols && j < rows )
{
dst[index] = 1.0 / ( 1.0 + exp(-src[index]) );
}
}
__global__ void _gpuSigmoidDerivative( int rows, int cols, const float* src, float* dst )
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
int index = j * cols + i;
if( i < cols && j < rows )
{
float s = 1.0 / ( 1.0 + exp(-src[index]) );
dst[index] = s * (1.0 - s);
}
}
__global__ void _gpuSigmoidDerivative( int rows, int cols, const double* src, double* dst )
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
int index = j * cols + i;
if( i < cols && j < rows )
{
double s = 1.0 / ( 1.0 + exp(-src[index]) );
dst[index] = s * (1.0 - s);
}
}
////////////////////////////////////////////////////////////////////////////////
///// Auxiliary functions implementation /////
////////////////////////////////////////////////////////////////////////////////
int iDivUp( int a, int b )
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
void gpuSigmoid( int rows, int cols, const float* src, float* dst )
{
dim3 threadsPerBlock(32, 16);
dim3 blockSize( iDivUp( cols, threadsPerBlock.x ), iDivUp( rows, threadsPerBlock.y ) );
_gpuSigmoid<<<blockSize, threadsPerBlock>>>( rows, cols, src, dst );
cudaDeviceSynchronize();
}
void gpuSigmoid( int rows, int cols, const double* src, double* dst )
{
dim3 threadsPerBlock(32, 16);
dim3 blockSize( iDivUp( cols, threadsPerBlock.x ), iDivUp( rows, threadsPerBlock.y ) );
_gpuSigmoid<<<blockSize, threadsPerBlock>>>( rows, cols, src, dst );
cudaDeviceSynchronize();
}
void gpuSigmoidDerivative( int rows, int cols, const float* src, float* dst )
{
dim3 threadsPerBlock(32, 16);
dim3 blockSize( iDivUp( cols, threadsPerBlock.x ), iDivUp( rows, threadsPerBlock.y ) );
_gpuSigmoidDerivative<<<blockSize, threadsPerBlock>>>( rows, cols, src, dst );
cudaDeviceSynchronize();
}
void gpuSigmoidDerivative( int rows, int cols, const double* src, double* dst )
{
dim3 threadsPerBlock(32, 16);
dim3 blockSize( iDivUp( cols, threadsPerBlock.x ), iDivUp( rows, threadsPerBlock.y ) );
_gpuSigmoidDerivative<<<blockSize, threadsPerBlock>>>( rows, cols, src, dst );
cudaDeviceSynchronize();
}
}
|
8,228 | #include <stdio.h>
__device__ const char *STR = "Hello World!";
const char STR_LENGTH = 12;
__global__ void hello() {
printf("%c\n", STR[threadIdx.x % STR_LENGTH]);
}
int main(void) {
// int num_threads = STR_LENGTH;
// int num_blocks = 2;
// dim3 dimBlock(16,16);
// dim3 dimGrid(32,32);
hello<<<1, STR_LENGTH>>>();
cudaDeviceSynchronize();
return 0;
} |
8,229 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <float.h>
__global__ void vector_sub(double *out, const double *a, double c, int m, int n) {
int tid =threadIdx.x;
if (tid >= m && tid < n) {
out[tid] -= a[tid] * c;
}
}
void printMatrix(double **matrix, const int *SIZE) {
for (int i = 0; i < *SIZE; ++i) {
for (int j = 0; j < *SIZE; ++j) {
printf("%f ", matrix[i][j]);
}
printf("\n");
}
printf("\n");
}
double diagonalMultiplication(double **matrix, const int *SIZE) {
double rez = 1;
for (int i = 0; i < *SIZE; ++i) rez *= matrix[i][i];
return rez;
}
int zeroesCheck(const double *range, const int *SIZE) {
int count = 0, flag = 1;
for (int i = 0; i < *SIZE; ++i)
if (range[i] == 0 && flag) {
count++;
} else flag = 0;
return count;
}
int power(int a, int b) {
int rez = 1;
for (int i = 0; i < b; ++i) rez *= a;
return rez;
}
int sort(double **matrix, int *SIZE) {
int i, j, count = 0;
double *temp;
for (i = 0; i < *SIZE - 1; i++)
for (j = 0; j < *SIZE - i - 1; j++) {
if (zeroesCheck(matrix[j], SIZE) > zeroesCheck(matrix[j + 1], SIZE)) {
count++;
temp = matrix[j];
matrix[j] = matrix[j + 1];
matrix[j + 1] = temp;
}
}
return power(-1, count);
}
double gaussianDeterminant(double **matrix, int* SIZE) {
int size = *SIZE;
double first, factor;
double *d_a, *d_out;
cudaMalloc((void **) &d_a, sizeof(double) * *SIZE);
cudaMalloc((void **) &d_out, sizeof(double) * *SIZE);
while (size > 1) {
if (matrix[*SIZE - size][*SIZE - size] == 0) return 0;
first = matrix[*SIZE - size][*SIZE - size];
for (int i = *SIZE - size + 1; i < *SIZE; ++i) {
factor = matrix[i][*SIZE - size] / first;
cudaMemcpy(d_out, matrix[i], sizeof(double) * *SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(d_a, matrix[*SIZE - size], sizeof(double) * *SIZE, cudaMemcpyHostToDevice);
vector_sub <<< 1, *SIZE >>>(d_out, d_a, factor, *SIZE - size, *SIZE);
cudaMemcpy(matrix[i], d_out, sizeof(double) * *SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
}
size--;
}
cudaFree(d_a);
cudaFree(d_out);
return diagonalMultiplication(matrix, SIZE);
}
void init() {
FILE *fp1, *fp2;
if ((fp1 = fopen("read.txt", "r")) == nullptr) {
printf("Can't open file 'read.txt'\n");
exit(-1);
}
if ((fp2 = fopen("write.txt", "w")) == nullptr) {
printf("Can't open file 'write.txt'\n");
exit(-1);
}
double **matrix;
double determinant;
int SIZE, sign;
clock_t time_start, time_finish;
while (fscanf(fp1, "%d", &SIZE) == 1) {
matrix = (double **) malloc(SIZE * sizeof(double *));
for (int i = 0; i < SIZE; ++i) {
matrix[i] = (double *) malloc(SIZE * sizeof(double));
for (int j = 0; j < SIZE; ++j) {
fscanf(fp1, "%lf", &matrix[i][j]);
}
}
time_start = clock();
sign = sort(matrix, &SIZE);
determinant = gaussianDeterminant(matrix, &SIZE) * (double) sign;
time_finish = clock();
fprintf(fp2, "%ld %f\n", time_finish - time_start, determinant);
for (int i = 0; i < SIZE; ++i) free(matrix[i]);
free(matrix);
if (determinant > DBL_MAX) exit(-2);
}
fclose(fp1);
fclose(fp2);
}
int main() {
init();
return 0;
}
|
8,230 | #include "basic_conv.cuh"
#include "assert.h"
#include "real.h"
#include <iostream>
#include "const_conv.cuh"
void test1(){
real A[]={1,2,3,4,5,6,7};
real M[]={3,4,5,4,3};
real P[7];
basic_conv(A,M,P,5,7);
for(int i=0; i<7; i++)
std::cout<< "P[" <<i << "]="<<P[i] <<std::endl;
assert(P[1]==38);
}
void test2(){
real A[]={1,2,3,4,5,6,7};
real M[]={3,4,5,4,3};
real P[7];
constant_conv(A,M,P,5,7);
for(int i=0; i<7; i++)
std::cout<< "P[" <<i << "]="<<P[i] <<std::endl;
assert(P[1]==38);
}
int main(){
test1();
test2();
std::cout<< "success!\n" <<std::flush;
}
|
8,231 | #include <iostream>
#include <cmath>
#include <chrono>
#include <random>
#include <limits>
#include <cuda.h>
typedef std::chrono::high_resolution_clock Clock;
#define NUM_TEST 10000000
#define NUM_BLOCKS 1
#define K 100
using namespace std;
// Helper function for modular exponentiation.
// Returns a^e (mode n)
__device__ unsigned long long modexp(unsigned long long a, unsigned long long e, unsigned long long n) {
unsigned long long res = 1;
a = a % n; // Compute a mod n first (if a > n)
while (e > 0)
{
// exponent is odd
if (e & 1)
res = (res * a) % n;
// exponent is even
e = e >> 1; // Shift right one (divide by 2)
a = (a * a) % n; // Set a = a^2 mod n
}
return res;
}
// Called each iteration of witness loop.
// Returns false if composite or true if probably prime
__global__ void witnessTest(float *d_random_nums, volatile bool *shared_result, unsigned long long d, unsigned long long n) {
if (!(*shared_result)) return;
// Pick a random number in [2..n-2]
unsigned long long a = 2 + d_random_nums[threadIdx.x] * (n-4);
unsigned long long x = modexp(a, d, n);
if (x == 1 || x == n-1) {
return;
}
// Iterate r times (2^r * d = n - 1)
while (d != n-1) {
x = (x * x) % n;
d *= 2;
if (x == 1) {
*shared_result = false;
return;
}
if (x == n-1) {
return;
}
}
// Return composite
*shared_result = false;
}
// See: https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
// Returns true if k-probably prime (k is a parameter that determines accuracy)
// Returns false if composite
bool millerRabinPrimalityTest(unsigned long long n, unsigned long long k, float *random_nums) {
if (n == 4) return false;
if (n <= 3) return true;
// Find r such that n = 2^d * r + 1 for some r >= 1
unsigned long long d = n - 1;
while (d % 2 == 0) {
d /= 2;
}
volatile bool *d_result;
float *d_random_nums;
bool result = true;
cudaMalloc((void **) &d_result, sizeof(bool));
cudaMalloc((void **) &d_random_nums, K * sizeof(float));
cudaMemcpy((void *) d_result, &result, sizeof(bool), cudaMemcpyHostToDevice);
cudaMemcpy((void *) d_random_nums, random_nums, K * sizeof(float), cudaMemcpyHostToDevice);
// Witness loop to repeat k times
// As long as K <= 256, run on 1 block
witnessTest<<<NUM_BLOCKS, K>>>(d_random_nums, d_result, d, n);
cudaMemcpy(&result, (void *) d_result, sizeof(bool), cudaMemcpyDeviceToHost);
cudaFree((void *) d_result);
cudaFree(d_random_nums);
return result;
}
// void random_test() {
// random_device rd;
// mt19937_64 eng(rd());
// uniform_int_distribution<unsigned long long> distr;
// cout << "Starting Miller-Rabin CUDA test for " << NUM_TEST << " numbers with parameter k = " << K << ". Tests primality with accuracy " << (1 - (1/pow(4, K))) << "." << endl;
// auto begin = Clock::now();
// for (int i = 0; i < NUM_TEST; i++) {
// millerRabinPrimalityTest(distr(eng), K, );
// }
// auto end = Clock::now();
// auto totalDuration = chrono::duration_cast<chrono::nanoseconds>(end - begin).count();
// auto avgDuration = ((double) totalDuration) / NUM_TEST;
// cout << "Total Time: " << totalDuration << " nanoseconds" << endl;
// cout << "Average Time per iteration: " << avgDuration << " nanoseconds" << endl;
// }
void single_test() {
random_device rd;
mt19937_64 eng(rd());
uniform_int_distribution<unsigned long> distr;
float *random_nums = new float[K];
for (int i=0; i<K; i++) {
random_nums[i] = (float) distr(eng) / (ULONG_MAX);
}
int numTest = 10000000;
cout << "Starting Miller-Rabin CUDA test for " << numTest << " numbers with parameter k = " << K << ". Tests primality with accuracy " << (1 - (1/pow(4, K))) << "." << endl;
auto begin = Clock::now();
for (int i = 0; i < numTest; i++) {
millerRabinPrimalityTest(distr(eng), K, random_nums);
}
auto end = Clock::now();
auto totalDuration = chrono::duration_cast<chrono::nanoseconds>(end - begin).count();
auto avgDuration = ((double) totalDuration) / numTest;
cout << "Total Time: " << totalDuration << " nanoseconds" << endl;
cout << "Average Time per iteration: " << avgDuration << " nanoseconds" << endl;
}
int main(int argc, char const *argv[]) {
single_test();
return 0;
}
|
8,232 | #include "includes.h"
__global__ void stencil_ld(unsigned *in, unsigned *out){
__shared__ int temp[BLOCK_SIZE + 2*RADIUS];
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x;
temp[lindex] = in[gindex];
if(threadIdx.x < RADIUS){
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex - BLOCK_SIZE];
}
__syncthreads();
int result = 0;
for(int offset = -RADIUS; offset < RADIUS; offset++){
result += temp[lindex + offset];
}
out[gindex] = result;
} |
8,233 |
// BlockDim = 16x16
//GridDim = w/16*h/16
extern "C" __global__ void RGB2NV12( unsigned char *in_rgb, unsigned char *nv12,
int rgb_width, int rgb_height, int rgb_pitch, int nv12_pitch )
{
unsigned char *rgb1,*rgb2,*rgb3,*rgb4, r,g,b,u,v;
unsigned char *pYDst, *pUVDst;
int x,y,uv_y;
x = blockIdx.x*blockDim.x+threadIdx.x;
y = blockIdx.y*blockDim.y+threadIdx.y;//Y
uv_y = y + (rgb_height<<1);// UV
if ((x < rgb_width) && (y < rgb_height))
{
rgb1 = in_rgb + (y << 1) * rgb_pitch + (x << 1) * 3;
rgb2 = in_rgb + (y << 1) * rgb_pitch + ((x << 1) + 1) * 3;
rgb3 = rgb1 + rgb_pitch;
rgb4 = rgb2 + rgb_pitch;
pYDst = nv12 + (y << 1)*nv12_pitch;
u = -0.09991f * rgb1[0] - 0.33609f * rgb1[1] + 0.436f * rgb1[2] + 128;
v = 0.614f * rgb1[0] - 0.55861f * rgb1[1] - 0.05639f * rgb1[2] + 128;
pYDst[x << 1] = (rgb1[0]+ rgb1[1] + rgb1[2])/3.492f + 104.339f - 0.446f*u - 0.224f*v;//Y
u = -0.09991f * rgb2[0] - 0.33609f * rgb2[1] + 0.436f * rgb2[2] + 128;
v = 0.614f*rgb2[0] - 0.55861f*rgb2[1] - 0.05639f*rgb2[2] + 128;
pYDst[(x << 1) + 1] = (rgb2[0] + rgb2[1] +rgb2[2])/3.492f + 104.339f - 0.446f*u - 0.224f*v;//Y
pYDst = nv12 + ((y << 1) + 1)*nv12_pitch;
u = -0.09991f * rgb3[0]- 0.33609f * rgb3[1] + 0.436f * rgb3[2] + 128;
v = 0.614f * rgb3[0] - 0.55861f * rgb3[1] - 0.05639f * rgb3[2] + 128;
pYDst[x << 1] = (rgb3[0]+ rgb3[1] + rgb3[2])/3.492f + 104.339f - 0.446f*u - 0.224f*v;//Y
u = -0.09991f * rgb4[0] - 0.33609f * rgb4[1] + 0.436f * rgb4[2] + 128;
v = 0.614f * rgb4[0] - 0.55861f * rgb4[1] - 0.05639f * rgb4[2] + 128;
pYDst[(x << 1) + 1] = (rgb4[0]+ rgb4[1] + rgb4[2])/3.492f + 104.339f - 0.446f*u - 0.224f*v;//Y
r = (rgb1[0] + rgb2[0] + rgb3[0] + rgb4[0])/4;
g = (rgb1[1] + rgb2[1] + rgb3[1] + rgb4[1])/4;
b = (rgb1[2] + rgb2[2] + rgb3[2] + rgb4[2])/4;
pUVDst = nv12 + uv_y*nv12_pitch;
pUVDst[x << 1] = -0.09991f*r - 0.33609f*g + 0.436f*b + 128;//U
pUVDst[(x << 1) + 1] = 0.614f*r - 0.55861f*g - 0.05639f*b + 128;//V
}
}
|
8,234 | #include <iostream>
#include <map>
#include <string>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
using namespace std;
struct __align__(8) node{
char val;
struct node *child;
//map<int, int> comp_map;
//int sum;
};
__global__
void kernel(node * tree, char *out, int *output_sum, int n)
{
node *p = tree;
int i=0;
while(p->val != 0) {
out[i++] = p->val;
//output_sum += p->comp_map[i];
p = p->child;
}
}
int main(void)
{
const int n = 15;
char data[n] = "tietamattomana";
node tree[n];
thrust::host_vector<char> h_vec(100);
//for(int i=0; i<100; i++){
h_vec[0] = 'R';
h_vec[1] = 'T';
//}
thrust::device_vector<char> d_vec = h_vec;
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
/*
thrust::host_vector<int> h_vec( 16*1024*1024 );
thrust::generate(h_vec.begin(), h_vec.end(), rand);
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
thrust::sort(d_vec.begin(), d_vec.end());
// sort data on the device
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
*/
node * tree_d;
char * output_d;
int * output_sum;
cudaMalloc((void **)&tree_d, n * sizeof(node));
cudaMalloc((void **)&output_d, n * sizeof(char));
cudaMalloc((void **)&output_sum, sizeof(int));
node * p = tree_d;
for(int i=0; i<n; i++) {
tree[i].val = data[i];
tree[i].child = (++p);
//tree[i].comp_map[i] = i;
//tree[i].sum = 0;
}
cudaMemcpy(tree_d, tree, n * sizeof(node), cudaMemcpyHostToDevice);
kernel<<<1,1>>>(tree_d, output_d, output_sum, n);
char output[n];
cudaMemcpy(output, output_d, n * sizeof(char), cudaMemcpyDeviceToHost);
for(int i=0; i<n; i++) {
std::cout << output[i];
}
std::cout << std::endl;
//int *output_int;
//cudaMemcpy(output_int, output_sum, sizeof(int), cudaMemcpyDeviceToHost);
//std::cout << "rrrrrr: " << output_int << std::endl;
return 0;
}
|
8,235 | //
// CUDA code to compute minimu distance between n points
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define MAX_POINTS 1048576
#define BLOCK_SIZE 1024
// ----------------------------------------------------------------------------
// Kernel Function to compute distance between all pairs of points
// Input:
// X: X[i] = x-coordinate of the ith point
// Y: Y[i] = y-coordinate of the ith point
// n: number of points
//
// Output:
// D: D[0] = minimum distance
//
__global__ void minimum_distance(float * X, float * Y, float * D, float * Glob,
int n) {
unsigned int i = ((blockIdx.x * blockDim.x) + threadIdx.x);
int j = 0;
if (i < n) {
// complete first stop to initalize!!!!!!
float xFirst = X[i], yFirst = Y[i];
float xComp = X[i + 1], yComp = Y[i + 1];
float sqX = (xComp - xFirst);
float sqY = (yComp - yFirst);
D[i] = sqrtf(sqX * sqX + sqY * sqY);
for (j = i + 1; j < n; j++) {
xComp = X[j];
yComp = Y[j];
float sqX = (xComp - xFirst);
float sqY = (yComp - yFirst);
float distance = sqrtf(sqX * sqX + sqY * sqY);
if (distance < D[i])
D[i] = distance;
}
}
__syncthreads();
if (i < blockDim.x && i < n) {
float sdata;
int tid = threadIdx.x;
int shift = n;
if (blockDim.x < n) {
shift = n / blockDim.x;
sdata = D[tid * shift];
for (j = 1; j < shift; j++) {
if (sdata > D[(tid * shift) + j])
sdata = D[(tid * shift) + j];
}
shift = blockDim.x;
D[tid] = sdata;
}
__syncthreads();
for (unsigned int s = shift / 2; s > 0; s >>= 1) {
if (tid < s) {
if (D[tid] > D[tid + s]) {
D[tid] = D[tid + s];
}
}
__syncthreads();
}
if (i == 0)
*Glob = D[0];
}
}
// ----------------------------------------------------------------------------
// Main program - initializes points and computes minimum distance
// between the points
//
int main(int argc, char* argv[]) {
// Host Data
float * hVx; // host x-coordinate array
float * hVy; // host y-coordinate array
float * hmin_dist; // minimum value on host
// Device Data
float * dVx; // device x-coordinate array
float * dVy; // device x-coordinate array
float * dmin_dist; // minimum value on device
float * dVd;
int i, j, size, num_points, threads, blocks;
float dx, dy, Dij, distance;
unsigned int seed = 0;
cudaEvent_t start, stop; // GPU timing variables
struct timeval cpu_start, cpu_stop; // CPU timing variables
float time_array[10];
// Timing initializations
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Check input
if (argc != 2) {
printf("Use: %s <number of points>\n", argv[0]);
exit(0);
}
if ((num_points = atoi(argv[argc - 1])) > MAX_POINTS) {
printf("Maximum number of points allowed: %d\n", MAX_POINTS);
exit(0);
}
// Allocate host coordinate arrays
size = num_points * sizeof(float);
hVx = (float *) malloc(size);
hVy = (float *) malloc(size);
hmin_dist = (float *) malloc(sizeof(float));
// Initialize points
for (i = 0; i < num_points; i++) {
hVx[i] = (float) (rand_r(&seed)) / (float) (RAND_MAX);
hVy[i] = (float) (rand_r(&seed)) / (float) (RAND_MAX);
}
// Allocate device coordinate arrays
cudaMalloc(&dVx, size);
cudaMalloc(&dVy, size);
cudaMalloc(&dmin_dist, sizeof(float));
cudaMalloc(&dVd, size);
// Copy coordinate arrays from host memory to device memory
cudaEventRecord(start, 0);
cudaMemcpy(dVx, hVx, size, cudaMemcpyHostToDevice);
cudaMemcpy(dVy, hVy, size, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&(time_array[0]), start, stop);
// Invoke kernel
cudaEventRecord(start, 0);
threads = 256;
blocks = 1;
if (num_points > threads) {
blocks = num_points / threads;
}
minimum_distance<<<blocks,threads>>>(dVx, dVy, dVd, dmin_dist, num_points);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&(time_array[1]), start, stop);
// Copy result from device memory to host memory
cudaEventRecord(start, 0);
cudaMemcpy(hmin_dist, dmin_dist, sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&(time_array[2]), start, stop);
printf("Number of Points = %d\n", num_points);
printf("GPU Host-to-device = %f ms \n", time_array[0]);
printf("GPU execution time = %f ms \n", time_array[1]);
printf("GPU Device-to-host = %f ms \n", time_array[2]);
printf("Minimum distance (GPU) = %e\n", hmin_dist[0]);
// Compute minimum distance on host to check device computation
gettimeofday(&cpu_start, NULL);
dx = hVx[1] - hVx[0];
dy = hVy[1] - hVy[0];
distance = sqrtf(dx * dx + dy * dy);
for (i = 0; i < num_points; i++) {
for (j = i + 1; j < num_points; j++) {
dx = hVx[j] - hVx[i];
dy = hVy[j] - hVy[i];
Dij = sqrtf(dx * dx + dy * dy);
if (distance > Dij)
distance = Dij;
}
}
gettimeofday(&cpu_stop, NULL);
time_array[3] = 1000 * (cpu_stop.tv_sec - cpu_start.tv_sec)
+ 0.000001 * (cpu_stop.tv_usec - cpu_start.tv_usec);
printf("CPU execution time = %f ms\n", time_array[3]);
printf("Minimum distance (CPU) = %e\n", distance);
// Free device memory
cudaFree(dVx);
cudaFree(dVy);
cudaFree(dmin_dist);
cudaFree(dVd);
// Free host memory
free(hVx);
free(hVy);
free(hmin_dist);
return 0;
}
|
8,236 | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <cstdlib>
#include <ctime>
#include <iostream>
#define WIDTH 10
#define TILE_WIDTH 4
__global__ void matmulShared(float* matA, float* matB, float* matC){
__shared__ float sA[TILE_WIDTH][TILE_WIDTH];
__shared__ float sB[TILE_WIDTH][TILE_WIDTH];
// calculate thread id
unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x ;
unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y ;
float cVal = 0;
if(row < WIDTH && col < WIDTH){
for (int m = 0 ; m<WIDTH/TILE_WIDTH ; m++ ) // m indicate number of phase
{
sA[threadIdx.y][threadIdx.x] = matA[row*WIDTH + (m*TILE_WIDTH + threadIdx.x)] ;
sB[threadIdx.y][threadIdx.x] = matB[ ( m*TILE_WIDTH + threadIdx.y) * WIDTH + col] ;
__syncthreads() ; // for syncronizeing the threads
// Do for tile
for ( int k = 0; k<WIDTH ; k++ )
cVal += sA[threadIdx.x][k] * sB[k][threadIdx.y] ;
__syncthreads() ; // for syncronizeing the threads
}
matC[row*WIDTH + col] = cVal;
}
}
void matriksMulShared(float* mA, float* mB, float* mC){
//Device pointer
float* a_d, *b_d, *c_d;
//Matriks size
int size = WIDTH * WIDTH *sizeof(float) ;
//allocate dan copy matriks a
int cudaError = cudaMalloc((void**)&a_d, size);
if (cudaError != cudaSuccess)
{
fprintf(stderr, "Error invoking cudaMemcpy (ERRCODE %d)\n", cudaError);
}
fprintf(stderr, "cudaMemcpy (ERRCODE %d)\n", cudaError);
cudaMemcpy(a_d, mA, size , cudaMemcpyHostToDevice );
//allocate dan copy matriks b
cudaMalloc((void**)&b_d, size);
cudaMemcpy(b_d, mB, size , cudaMemcpyHostToDevice );
//allocate memory to device c
cudaMalloc((void**)&c_d, size);
int gridSize = (WIDTH/TILE_WIDTH) + (WIDTH%TILE_WIDTH>0?1:0);
dim3 dimGrid(gridSize, gridSize);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
matmulShared<<<dimGrid,dimBlock>>>(a_d,b_d,c_d);
cudaMemcpy(mC,c_d,size, cudaMemcpyDeviceToHost );
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
}
int main(void){
void matriksMulShared(float *, float *, float *);
float* M, *N, *P;
size_t size = WIDTH * WIDTH *sizeof(float);
// allocate arrays on host
M = (float *) malloc(size);
N = (float *) malloc(size);
P = (float *) malloc(size);
// float M[width*width], N[width*width], P[width*width];
for(int i = 0; i < (WIDTH*WIDTH) ; i++) {
M[i] = i;
N[i] = (WIDTH*WIDTH -1)- i;
P[i] = 0.f;
printf("%.3f %.3f %.3f\n", M[i], N[i], P[i]);
}
matriksMulShared(M, N, P);
for(int i = 0; i < (WIDTH*WIDTH) ; i++) {
if( i%WIDTH ==0){
printf("\n");
}
printf("%.3f ", P[i]);
}
free(M);
free(N);
free(P);
return 0;
}
|
8,237 | #include <stdio.h>
#define N 64
//#define N 128
//#define N 1024
//#define N 63
//#define N 65
//#define N 4096
#define TPB 32
//#define TPB 1
float scale(int i, int n){
return ((float) i)/(n - 1);
}
__device__
float distance(float x1, float x2){
return sqrt((x2-x1)*(x2-x1));
}
__global__
void distanceKernel(float *d_out, float *d_in, float ref){
const int i = blockIdx.x*blockDim.x + threadIdx.x;
float x = 0;
//if(i>N-1)
// return;
x = d_in[i];
d_out[i] = distance(x, ref);
if(0)
{
printf("blockIdx:%2d,blockDim:%2d,threadIdx:%2d, i = %2d: dist from %f to %f.\n",
blockIdx.x,blockDim.x,threadIdx.x,i, ref, x, d_out[i]);
}
//if(i==4095) printf("find 4095\n");
}
int main(){
const float ref = 0.5f;
float *in = 0;
float *out = 0;
cudaMallocManaged(&in, N*sizeof(float));
cudaMallocManaged(&out, N*sizeof(float));
for(int i=0;i<N;++i)
in[i]=scale(i,N);
distanceKernel<<<(N+TPB-1)/TPB, TPB>>>(out, in, ref);
cudaDeviceSynchronize();
cudaFree(in);
cudaFree(out);
return 0;
}
|
8,238 |
//This benchmark measures the kernel overhead as linear function a + Xb where X is the number of launched TBs
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define THREADS_NUM 1024 // one thread to initialize the pointer-chasing array
#define WARP_SIZE 32
#define ARRAY_SIZE 4096
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void kernel_lat_1TB(uint32_t *startClk, uint32_t *stopClk, uint64_t *posArray, uint64_t *dsink){
}
__global__ void kernel_lat_2TB(uint32_t *startClk, uint32_t *stopClk, uint64_t *posArray, uint64_t *dsink){
}
__global__ void kernel_lat_4TB(uint32_t *startClk, uint32_t *stopClk, uint64_t *posArray, uint64_t *dsink){
}
__global__ void kernel_lat_8TB(uint32_t *startClk, uint32_t *stopClk, uint64_t *posArray, uint64_t *dsink){
}
__global__ void kernel_lat_16TB(uint32_t *startClk, uint32_t *stopClk, uint64_t *posArray, uint64_t *dsink){
}
__global__ void kernel_lat_32TB(uint32_t *startClk, uint32_t *stopClk, uint64_t *posArray, uint64_t *dsink){
}
__global__ void kernel_lat_64TB(uint32_t *startClk, uint32_t *stopClk, uint64_t *posArray, uint64_t *dsink){
}
__global__ void kernel_lat_128TB(uint32_t *startClk, uint32_t *stopClk, uint64_t *posArray, uint64_t *dsink){
}
__global__ void kernel_lat_256TB(uint32_t *startClk, uint32_t *stopClk, uint64_t *posArray, uint64_t *dsink){
}
__global__ void kernel_lat_512TB(uint32_t *startClk, uint32_t *stopClk, uint64_t *posArray, uint64_t *dsink){
}
__global__ void kernel_lat_1024TB(uint32_t *startClk, uint32_t *stopClk, uint64_t *posArray, uint64_t *dsink){
}
__global__ void kernel_lat_2048TB(uint32_t *startClk, uint32_t *stopClk, uint64_t *posArray, uint64_t *dsink){
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(THREADS_NUM*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(THREADS_NUM*sizeof(uint32_t));
uint64_t *dsink = (uint64_t*) malloc(THREADS_NUM*sizeof(uint64_t));
uint32_t *startClk_g;
uint32_t *stopClk_g;
uint64_t *posArray_g;
uint64_t *dsink_g;
gpuErrchk( cudaMalloc(&startClk_g, THREADS_NUM*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&stopClk_g, THREADS_NUM*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&posArray_g, ARRAY_SIZE*sizeof(uint64_t)) );
gpuErrchk( cudaMalloc(&dsink_g, THREADS_NUM*sizeof(uint64_t)) );
kernel_lat_1TB<<<1,THREADS_NUM>>>(startClk_g, stopClk_g, posArray_g, dsink_g);
gpuErrchk( cudaPeekAtLastError() );
kernel_lat_2TB<<<2,THREADS_NUM>>>(startClk_g, stopClk_g, posArray_g, dsink_g);
gpuErrchk( cudaPeekAtLastError() );
/*
kernel_lat_4TB<<<4,THREADS_NUM>>>(startClk_g, stopClk_g, posArray_g, dsink_g);
gpuErrchk( cudaPeekAtLastError() );
kernel_lat_8TB<<<8,THREADS_NUM>>>(startClk_g, stopClk_g, posArray_g, dsink_g);
gpuErrchk( cudaPeekAtLastError() );
kernel_lat_16TB<<<16,THREADS_NUM>>>(startClk_g, stopClk_g, posArray_g, dsink_g);
gpuErrchk( cudaPeekAtLastError() );
kernel_lat_32TB<<<32,THREADS_NUM>>>(startClk_g, stopClk_g, posArray_g, dsink_g);
gpuErrchk( cudaPeekAtLastError() );
kernel_lat_64TB<<<64,THREADS_NUM>>>(startClk_g, stopClk_g, posArray_g, dsink_g);
gpuErrchk( cudaPeekAtLastError() );
kernel_lat_128TB<<<128,THREADS_NUM>>>(startClk_g, stopClk_g, posArray_g, dsink_g);
gpuErrchk( cudaPeekAtLastError() );
kernel_lat_256TB<<<256,THREADS_NUM>>>(startClk_g, stopClk_g, posArray_g, dsink_g);
gpuErrchk( cudaPeekAtLastError() );
kernel_lat_512TB<<<1024,THREADS_NUM>>>(startClk_g, stopClk_g, posArray_g, dsink_g);
gpuErrchk( cudaPeekAtLastError() );
kernel_lat_1024TB<<<1024,THREADS_NUM>>>(startClk_g, stopClk_g, posArray_g, dsink_g);
gpuErrchk( cudaPeekAtLastError() );
*/
kernel_lat_2048TB<<<2048,THREADS_NUM>>>(startClk_g, stopClk_g, posArray_g, dsink_g);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(startClk, startClk_g, THREADS_NUM*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(stopClk, stopClk_g, THREADS_NUM*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(dsink, dsink_g, THREADS_NUM*sizeof(uint64_t), cudaMemcpyDeviceToHost) );
printf("Kernel Launch Latency : Check CUDA profiler events\n");
return 0;
}
|
8,239 | /* Copyright (C) 2012 Fabrizio Gueli
*
* This file is part of Cuda-complex-sim
*
* Cuda-complex-sim is free software: you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation, either
* version 3 of the License, or (at your option) any later version.
*
* Cuda-complex-sim is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Cuda-complex-sim. If not, see <http://www.gnu.org/licenses/>.
*/
/*
#include <iostream>
#include <stdint.h>
#include <stdio.h>
#include "host.hpp"
#include "h_barabasi_game.hpp"
#include <utility> // for std::pair
#include <algorithm> // for std::for_each
#include <boost/graph/graph_traits.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/dijkstra_shortest_paths.hpp>
using namespace boost;
template <class Graph>
struct print_edges {
print_edges(Graph& g) : G(g) { }
typedef typename boost::graph_traits<Graph>::edge_descriptor Edge;
typedef typename boost::graph_traits<Graph>::vertex_descriptor Vertex;
void operator()(Edge e) const
{
typename boost::property_map<Graph, vertex_index_t>::type
id = get(vertex_index, G);
Vertex src = source(e, G);
Vertex targ = target(e, G);
cout << "(" << id[src] << "," << id[targ] << ") ";
}
Graph& G;
};
template <class Graph>
struct print_index {
print_index(Graph& g) : G(g){ }
typedef typename boost::graph_traits<Graph>::vertex_descriptor Vertex;
void operator()(Vertex c) const
{
typename boost::property_map<Graph,vertex_index_t>::type
id = get(vertex_index, G);
cout << id[c] << " ";
}
Graph& G;
};
template <class Graph>
struct stampa {
typedef typename boost::graph_traits<Graph>::vertex_descriptor Vertex;
stampa(Graph& _g) : g(_g) { }
void operator()(Vertex v) const
{
typename boost::property_map<Graph, vertex_index_t>::type
id = get(vertex_index, g);
cout << "vertex id: " << id[v] << endl;
cout << "out-edges: ";
for_each(out_edges(v, g).first, out_edges(v,g).second,
print_edges<Graph>(g));
cout << endl;
}
Graph& g;
};
int main(){
// writing out the edges in the graph
typedef std::pair<int, int> Edge;
Graph g = h_barabasi_game(3,1,100);
h_average_links_number = 5;
h_max_nodes_number= 100;
//calcParameters(g);
//Allocate memory for Host Compact List (Supplementary Link array size, max nodes number, average links number)
h_allocateDataStructures(200);
printf("Numero nodi :%d\n",h_max_nodes_number);
printf("Numero average_edges :%d\n",h_average_links_number);
Link p;
p.target = -1;
//Initialize Nodes Array and Links Array
h_initArray<bool>(false,h_nodes_array,100);
h_initArray<Link>(p,h_links_target_array,h_average_links_number*100);
//Convert Boost adjacency list to Compact List
adjlistToCompactList(g);
int j = 0;
for(j=0; j<h_max_nodes_number; j++){
printf("Scorro L'array dei nodi[%d]= %d\n",j,h_nodes_array[j]);
}
for(j=0; j<h_average_links_number*100; j++){
printf("Scorro L'array dei link[%d]= %d\n",j,h_links_target_array[j].target);
}
//Convert CompactList to AdjacencyList
CompactListToAdjList(&g);
boost::property_map<Graph, vertex_index_t>::type
id = get(vertex_index, g);
// cout << "vertices(g) = ";
// boost::graph_traits<Graph>::vertex_iterator vi;
//for (vi = vertices(g).first; vi != vertices(g).second; ++vi)
// std::cout << id[*vi] << " ";
//std::cout << std::endl;
for_each(vertices(g).first, vertices(g).second,
stampa<Graph>(g));
return 1;
}
*/
|
8,240 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <sys/timeb.h>
#include <sys/time.h>
#define N 300000000
#define LIMIT 4000
__global__ void hello(int *a, int *b, int *res, int const BLOCKS){
printf("[%d-%d] start \n", blockIdx.x, threadIdx.x);
int numEl = (N / BLOCKS / blockDim.x);
int start = (blockDim.x * blockIdx.x * numEl) + (threadIdx.x * numEl);
/*
#ifdef DEBUG
printf("threadIdx.x: %d blockIdx.x: %d, blockDim.x: %d, blockDim.y: %d, blockDim.z: %d\n", threadIdx.x, blockIdx.x, blockDim.x, blockDim.y, blockDim.z);
printf("start: %d, end: %d\n", start, start + numEl);
printf("N: %d, BLOCKS: %d, blockDim.x: %d, numEl: %d\n", N, BLOCKS, blockDim.x, numEl);
#endif
*/
for(int i = start; i < start + numEl; i++){
res[i] = (a[i] + b[i]) * 2;
}
printf("[%d-%d] end \n", blockIdx.x, threadIdx.x);
}
void array_init(int *a, int *b){
printf("init array . . .\n");
for(int i = 0; i<N; i++){
a[i] = i % LIMIT;
b[i] = (i * i) % LIMIT;
}
printf("end init array");
}
void sequential_product(int *a, int *b, int *res){
printf("Start sequential\n");
for(int i = 0; i < N; i++){
res[i] = (a[i] + b[i]) * 2;
}
printf("End sequential\n");
}
int main(void){
int BLOCKS = 5;
int THREAD_PER_BLOCK = 10;
int *a, *b, *res, *res_seq; //Host var
int *dev_a, *dev_b, *dev_res; //Device var
struct timeb init, fin;
int diff;
float millisec = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Init array
a = (int*)malloc(N * sizeof(int));
b = (int*)malloc(N * sizeof(int));
res = (int*)calloc(N, sizeof(int));
res_seq = (int*)calloc(N, sizeof(int));
ftime(&init);
array_init(a, b);
ftime(&fin);
diff = (int) (1000.0 * (fin.time - init.time) + (fin.millitm - init.millitm));
printf("Stop sequential, time: %d (millisec)\n\n\n\n", diff);
cudaMalloc((void**)&dev_a, N*sizeof(int));
cudaMalloc((void**)&dev_b, N*sizeof(int));
cudaMalloc((void**)&dev_res, N*sizeof(int));
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
//Sequential
ftime(&init);
sequential_product(a, b, res);
ftime(&fin);
diff = (int) (1000.0 * (fin.time - init.time) + (fin.millitm - init.millitm));
printf("Stop sequential, time: %d (millisec)\n\n\n\n", diff);
//Parallel
printf("Start parallel\n");
cudaEventRecord(start);
hello<<<BLOCKS,THREAD_PER_BLOCK>>>(dev_a, dev_b, dev_res, BLOCKS);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&millisec, start, stop);
printf("Stop parallel, time: %.5f (millisec)\n\n\n\n", millisec);
cudaMemcpy(res_seq, dev_res, N*sizeof(int), cudaMemcpyDeviceToHost);
#ifdef DEBUG
for(int i = 0; i<100; i++){
printf("res[%d] = %d, res_seq[%d] = %d\n", i, res[i], i, res_seq[i]);
}
#endif
//Corectness checking . . .
int isEqual = 1;
for(int i = 0; i<N; i++){
if(res[i] != res_seq[i]){
printf("res[%d] = %d, res_seq[%d] = %d\n", i, res[i], i, res_seq[i]);
isEqual = 0;
break;
}
}
if(isEqual)
printf("The two sum is equals\n");
else
printf("Error in sum\n");
cudaDeviceReset();
return 0;
}
|
8,241 | __global__ void hist(int *L, int size, int *hist, int n)
{
int idx, offset;
idx = blockIdx.x * blockDim.x + threadIdx.x;
offset = blockDim.x * gridDim.x;
for (int i = idx; i < size; i += offset) {
atomicAdd(&hist[L[i]],1);
}
}
|
8,242 | /*
* Rayhana ZIARA
* produit matrice matrice
*/
#include <stdlib.h>
#include <stdio.h>
/*
* DESCRIPTION : kernel concernant le produit matrice matrice
* PARAMETRES : matrice A, nb ligne de A, nb colonne de A, matrice B, nb ligne de B, nb colonne de B, matrice C, nb ligne de C et nb colonne de C
* RETOUR : /
*/
__global__ void matMul(float *A, int l_A, int c_A, float *B, int l_B, int c_B, float *C, int l_C, int c_C)
{
float resultat = 0.0;
int ligne = blockDim.x * blockIdx.x + threadIdx.x;
int colonne = blockDim.y * blockIdx.y + threadIdx.y;
if(ligne > l_A || colonne > c_B)
{
printf("ERREUR - Soit ligne > m soit colonne > m\n");
return;
}
for(int i = 0; i < c_A; i++)
resultat += A[ligne * c_A + i] * B[i * c_B + colonne];
C[ligne * c_C + colonne] = resultat;
}
/*
* DESCRIPTION : fonction d'affichage de matrice et de vecteur
* PARAMETRES : matrice à afficher, nb ligne et nb colonne de A,
* RETOUR : /
*/
void affichage(float *M, int ligne, int colonne)
{
for(int i = 0; i < ligne; i++)
{
for(int j = 0; j < colonne; j++)
fprintf(stdout, "%lf\t", M[i * ligne + j]);
fprintf(stdout, "\n");
}
fprintf(stdout, "\n");
}
int main(int argc, char **argv)
{
// declaration des variables du produit matrice matrice
// variables du hote
float *A, *B, *C;
int n, m; // taille des matrices A et B
// variables du device
float *d_A, *d_B, *d_C;
if(argc != 3)
{
fprintf(stderr, "ERREUR - Veuillez entrez la taille de A et la taille de B en parametre d'execution. Merci'\n./exam_rz n m\n");
return -1;
}
n = atoi(argv[1]); // taille de la matrice A(n * n)
m = atoi(argv[2]); // taille de la matrice B(m * m)
// allocation memoire dans le hote pour les matrices A, B et C
A = (float*)malloc(m * n * sizeof(float));
B = (float*)malloc(n * m * sizeof(float));
C = (float*)malloc(m * m * sizeof(float));
// initialisation des matrices (matrice stockée en 1D)
for(int i = 0; i < m; i++)
{
for(int j = 0; j < n; j++)
A[i * m + j] = i * m + j;
}
for(int i = 0; i < n; i++)
{
for(int j = 0; j < m; j++)
B[i * n + j] = i * n + j;
}
// allocation memoire dans le device pour les equivalents des matrices A, B et C
cudaMalloc(&d_A, m * n * sizeof(float));
cudaMalloc(&d_B, n * m * sizeof(float));
cudaMalloc(&d_C, m * m * sizeof(float));
// copie des matrices A et B dans le device
cudaMemcpy(d_A, A, m * n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, n * m * sizeof(float), cudaMemcpyHostToDevice);
// appel du kernel
dim3 threads(32, 16); // 32*16=512
dim3 blocks;
blocks.x = (m + threads.x - 1) / threads.x;
blocks.y = (m + threads.y - 1) / threads.y;
matMul<<<blocks, threads>>>(d_A, m, n, d_B, n, m, d_C, m, m);
// attente de tous les threads
cudaThreadSynchronize();
// copie de la matrice equivalente C dans le hote
cudaMemcpy(C, d_C, m * m * sizeof(float), cudaMemcpyDeviceToHost);
fprintf(stdout, "Matrice A\n");
affichage(A, m, n);
fprintf(stdout, "Matrice B\n");
affichage(B, n, m);
fprintf(stdout, "Matrice C\n");
affichage(C, m, m);
// liberation memoire hote
free(A);
free(B);
free(C);
// liberation memoire device
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
8,243 | #include "TFcore.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void _computeGradient(double* p, double* pshift, double* dp, int XRES) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
dp[i] = (p[i] - pshift[i]) * (double)XRES;
}
__global__ void _computeWeightedSum(double* p, double* ddp, double alpha) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
//p[i] += alpha * ddp[i] + beta * ((1 / (double)XRES) - p[i]);
p[i] = p[i] + alpha * ddp[i];
}
//__global__ void normalizeProb(double* pdist) {
// // <<< M, numChannels >>>
// int numChannels = blockDim.x;
// int m = blockIdx.x;
// int n = threadIdx.x;
//
// double psum = 0;
// for (int k = 0; k < XRES; k++) psum += pdist[k + n * XRES + m * numChannels * XRES];
// for (int k = 0; k < XRES; k++) pdist[k + n * XRES + m * numChannels * XRES] /= psum;
//}
__global__ void _computeProduct(double* pdist_prior, double* pdist_lik, double* pdist_post) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
pdist_post[i] = pdist_prior[i] * pdist_lik[i];
}
TFcore::TFcore() {
}
TFcore::TFcore(string filename) {
outputFile.open(filename, ios::out);
}
TFcore::TFcore(fs::path filename) {
outputFile.open(filename, ios::out);
}
//TFcore::TFcore(CTFcoreMFCDlg& dlg) : dlg_(dlg) {
// TFcore();
//}
TFcore::~TFcore() {
free(mu_init);
free(sig2_init);
free(sig2_reg);
free(sig2_update);
free(xbin);
free(xnew);
free(idnew);
free(emgMAV);
free(emgStack);
free(pdist_prior);
free(pdist_post);
free(pdist_lik);
inputFile.close();
outputFile.close();
}
void TFcore::initModel(int n) {
M = 1;
reglast = 0;
samples = 0;
bCollect = false;
bCompute = false;
bRegister = false;
bGPU_product = false;
bGPU_diffusion = false;
bFuncRegistration = true;
bFuncAdaptation = true;
XRES = 128;
WIN_MAV = 1024;
M_MAX = 2048;
// set channel
setNumChannels(n);
// memory allocation
mu_init = (double*)calloc(numChannels, sizeof(double));
sig2_init = (double*)calloc(numChannels, sizeof(double));
sig2_reg = (double*)calloc(numChannels, sizeof(double));
sig2_update = (double*)calloc(numChannels, sizeof(double));
emgMAV = (double*)calloc(numChannels, sizeof(double));
xnew = (double*)calloc(numChannels, sizeof(double));
idnew = (int*)calloc(numChannels, sizeof(int));
xbin = (double*)calloc(XRES, sizeof(double));
for (int k = 0; k < XRES; k++) xbin[k] = ((k + 1) / (double)(XRES));
emgStack = (double*)calloc(WIN_MAV * numChannels, sizeof(double));
// lookup table for normal distribution
tableSize = 10000;
discretizeStep = 1000;
normalTable = (double*)calloc(tableSize, sizeof(double));
constructLookup();
// distribution = max number of patterns
pdist_prior = (double*)calloc(XRES * numChannels * M_MAX, sizeof(double));
pdist_post = (double*)calloc(XRES * numChannels * M_MAX, sizeof(double));
pdist_lik = (double*)calloc(XRES * numChannels * M_MAX, sizeof(double));
dp = (double*)calloc(XRES * numChannels * M_MAX, sizeof(double));
ddp = (double*)calloc(XRES * numChannels * M_MAX, sizeof(double));
p_shift = (double*)calloc(XRES * numChannels * M_MAX, sizeof(double));
dp_shift = (double*)calloc(XRES * numChannels * M_MAX, sizeof(double));
p_lik = (double*)calloc(M_MAX, sizeof(double));
for (int m = 0; m < M_MAX; m++) {
p_lik[m] = NAN;
for (int n = 0; n < numChannels; n++) {
for (int k = 0; k < XRES; k++) {
pdist_prior[k + n * XRES + m * XRES * numChannels] = NAN;
pdist_post[k + n * XRES + m * XRES * numChannels] = NAN;
pdist_lik[k + n * XRES + m * XRES * numChannels] = NAN;
dp[k + n * XRES + m * XRES * numChannels] = 0;
ddp[k + n * XRES + m * XRES * numChannels] = 0;
p_shift[k + n * XRES + m * XRES * numChannels] = (1 / (double)XRES);
dp_shift[k + n * XRES + m * XRES * numChannels] = 0;
}
}
}
// model parameter
xmax = 5.00e-5;
alpha = 1.00e-10;
beta = 1.00e-50;
p_star = -20;
reghold = 512;
setVal(mu_init, 3.00e-2);
setVal(sig2_init, 1.00e-1);
setVal(sig2_reg, 1.00e-2);
setVal(sig2_update, 1.00e+2);
setZero(emgMAV);
// initialize first pattern
for (int n = 0; n < numChannels; n++) {
for (int k = 0; k < XRES; k++) {
// assign uniform distribution
pdist_prior[k + n * XRES] = (1 / (double)XRES);
pdist_post[k + n * XRES] = (1 / (double)XRES);
pdist_lik[k + n * XRES] = (1 / (double)XRES);
dp[k + n * XRES] = 0;
ddp[k + n * XRES] = 0;
p_shift[k + n * XRES] = (1 / (double)XRES);
dp_shift[k + n * XRES] = (1 / (double)XRES);
}
}
_pdist_prior = nullptr;
_pdist_lik = nullptr;
_pdist_post = nullptr;
_p_shift = nullptr;
_dp_shift = nullptr;
_dp = nullptr;
_ddp = nullptr;
if (!outputFile.is_open()) outputFile.open("output.csv", ios::out);
}
void TFcore::getSample() {
samples++;
// a vertical stack of row samples to compute MAV
// the first row = the latest sample
string data;
string line;
stringstream parse;
//for (int n = 0; n < numChannels; n++) {
//memcpy(&emgStack[1 + n * WIN_MAV], &emgStack[0 + n * WIN_MAV], (WIN_MAV - 1) * sizeof(double));
//emgStack[n * WIN_MAV] = dataStack.front();
//dataStack.erase(dataStack.begin());
//}
getline(inputFile, line, '\n');
parse.str(line);
for (int n = 0; n < numChannels; n++) {
memcpy(&emgStack[1 + n * WIN_MAV], &emgStack[0 + n * WIN_MAV], (WIN_MAV - 1) * sizeof(double));
getline(parse, data, ',');
emgStack[n * WIN_MAV] = stod(data);
}
// compute MAV = a single row vector
for (int n = 0; n < numChannels; n++) {
emgMAV[n] = 0;
for (int m = 0; m < WIN_MAV; m++) {
emgMAV[n] += abs(emgStack[m + n * WIN_MAV]) / (double)(WIN_MAV);
}
}
bCollect = true;
bCompute = false;
}
void TFcore::proceedIteration() {
if (isCollect() && (samples > 2 * WIN_MAV)) {
bRegister = false;
reglast += 1;
p_max = -999;
mpred = 0;
for (int n = 0; n < numChannels; n++) {
xnew[n] = (1 / xmax) * emgMAV[n];
xnew[n] = min((double)1, xnew[n]);
idnew[n] = floor(xnew[n] * (double)(XRES));
if (idnew[n] > (XRES - 1)) idnew[n] = XRES - 1;
}
if (bFuncAdaptation) {
if (bGPU_diffusion) {
cudaMalloc(&_pdist_prior, M * XRES * numChannels * sizeof(double));
cudaMalloc(&_p_shift, M * XRES * numChannels * sizeof(double));
cudaMalloc(&_dp_shift, M * XRES * numChannels * sizeof(double));
cudaMalloc(&_dp, M * XRES * numChannels * sizeof(double));
cudaMalloc(&_ddp, M * XRES * numChannels * sizeof(double));
// memcpy pdist_prior => pshift(dist_prior)
for (int m = 0; m < M; m++) {
for (int n = 0; n < numChannels; n++) {
memcpy(&p_shift[1 + n * XRES + m * (XRES * numChannels)],
&pdist_prior[0 + n * XRES + m * (XRES * numChannels)],
(XRES - 1) * sizeof(double));
p_shift[0 + n * XRES + m * (XRES * numChannels)] = p_shift[1 + n * XRES + m * (XRES * numChannels)];
}
}
cudaMemcpy(_pdist_prior, pdist_prior, M * XRES * numChannels * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(_p_shift, p_shift, M * XRES * numChannels * sizeof(double), cudaMemcpyHostToDevice);
_computeGradient << < 1, M* XRES* numChannels >> > (_pdist_prior, _p_shift, _dp, XRES);
cudaMemcpy(dp, _dp, M * XRES * numChannels * sizeof(double), cudaMemcpyDeviceToHost);
// memcpy dp(dist_prior) => dpshift(dist_prior)
for (int m = 0; m < M; m++) {
for (int n = 0; n < numChannels; n++) {
memcpy(&dp_shift[1 + n * XRES + m * (XRES * numChannels)],
&dp[0 + n * XRES + m * (XRES * numChannels)],
(XRES - 1) * sizeof(double));
dp_shift[0 + n * XRES + m * (XRES * numChannels)] = dp_shift[1 + n * XRES + m * (XRES * numChannels)];
}
}
//cudaMemcpy(_dp, dp, M * XRES * numChannels * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(_dp_shift, dp_shift, M * XRES * numChannels * sizeof(double), cudaMemcpyHostToDevice);
_computeGradient << < 1, M* XRES* numChannels >> > (_dp, _dp_shift, _ddp, XRES);
cudaMemcpy(ddp, _ddp, M * XRES * numChannels * sizeof(double), cudaMemcpyDeviceToHost);
_computeWeightedSum << < 1, M* XRES* numChannels >> > (_pdist_prior, _ddp, alpha);
cudaMemcpy(pdist_prior, _pdist_prior, M * XRES * numChannels * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(_pdist_prior);
cudaFree(_p_shift);
cudaFree(_dp_shift);
cudaFree(_dp);
cudaFree(_ddp);
}
else {
computeDiffusion();
}
normalizePrior();
computeLikelihood(xnew, sig2_update);
//normalizeLikelihood();
if (bGPU_product) {
cudaMalloc(&_pdist_prior, M * XRES * numChannels * sizeof(double));
cudaMalloc(&_pdist_lik, M * XRES * numChannels * sizeof(double));
cudaMalloc(&_pdist_post, M * XRES * numChannels * sizeof(double));
cudaMemcpy(_pdist_prior, pdist_prior, M * XRES * numChannels * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(_pdist_lik, pdist_lik, M * XRES * numChannels * sizeof(double), cudaMemcpyHostToDevice);
_computeProduct << < M * XRES * numChannels, 1 >> > (_pdist_prior, _pdist_lik, _pdist_post);
cudaMemcpy(pdist_post, _pdist_post, M * XRES * numChannels * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(_pdist_prior);
cudaFree(_pdist_lik);
cudaFree(_pdist_post);
}
else {
computeProduct();
}
normalizePost();
// get maximum likelihood probability
for (int m = 0; m < M; m++) {
p_lik[m] = 0;
for (int n = 0; n < numChannels; n++) {
p_lik[m] += log10(pdist_post[idnew[n] + n * XRES + m * numChannels * XRES]);
}
//if (p_lik[m] < 1e-200) p_lik[m] = 1e-200;
// MLE prediction
if (p_lik[m] > p_max) {
p_max = p_lik[m];
mpred = m;
}
}
}
// registration
if (bFuncRegistration) {
if ((p_max < p_star) && (reglast > reghold) && (samples > 2 * WIN_MAV)) {
registerPattern(xnew, sig2_reg);
p_lik[M] = 0;
for (int n = 0; n < numChannels; n++) {
p_lik[M] += log10(pdist_post[idnew[n] + n * XRES + M * numChannels * XRES]);
}
p_max = p_lik[M];
mpred = M;
M += 1;
reglast = 0;
bRegister = true;
}
memcpy(&pdist_prior[mpred * XRES * numChannels], &pdist_post[mpred * XRES * numChannels], XRES * numChannels * sizeof(double));
}
bCollect = false;
bCompute = true;
}
else {
return;
}
}
void TFcore::writeResult() {
outputFile << p_max << ',' << mpred << ',';
for (int n = 0; n < numChannels; n++) outputFile << xnew[n] << ',';
outputFile << endl;
}
void TFcore::exportModel(fs::path filename) {
fstream file;
file.open(filename, ios::out | ios::binary);
file.write(reinterpret_cast<char*>(&numChannels), sizeof(int));
file.write(reinterpret_cast<char*>(&M_MAX), sizeof(int));
file.write(reinterpret_cast<char*>(&xmax), sizeof(double));
file.write(reinterpret_cast<char*>(&XRES), sizeof(int));
file.write(reinterpret_cast<char*>(&WIN_MAV), sizeof(int));
file.write(reinterpret_cast<char*>(&alpha), sizeof(double));
file.write(reinterpret_cast<char*>(&beta), sizeof(double));
file.write(reinterpret_cast<char*>(&p_star), sizeof(double));
file.write(reinterpret_cast<char*>(&M), sizeof(int));
file.write(reinterpret_cast<char*>(®hold), sizeof(int));
file.write(reinterpret_cast<char*>(&tableSize), sizeof(int));
file.write(reinterpret_cast<char*>(&discretizeStep), sizeof(int));
for (int k = 0; k < tableSize; k++) {
file.write(reinterpret_cast<char*>(&normalTable[k]), sizeof(double));
}
for (int n = 0; n < numChannels; n++) {
file.write(reinterpret_cast<char*>(&sig2_reg[n]), sizeof(double));
file.write(reinterpret_cast<char*>(&sig2_update[n]), sizeof(double));
}
for (int m = 0; m < M; m++) {
for (int n = 0; n < numChannels; n++) {
for (int k = 0; k < XRES; k++) {
file.write(reinterpret_cast<char*>(&pdist_prior[k + n * XRES + m * XRES * numChannels]), sizeof(double));
file.write(reinterpret_cast<char*>(&pdist_post[k + n * XRES + m * XRES * numChannels]), sizeof(double));
}
}
}
file.close();
}
void TFcore::importModel(fs::path filename) {
fstream file;
file.open(filename, ios::in | ios::binary);
file.read(reinterpret_cast<char*>(&numChannels), sizeof(int));
file.read(reinterpret_cast<char*>(&M_MAX), sizeof(int));
file.read(reinterpret_cast<char*>(&xmax), sizeof(double));
file.read(reinterpret_cast<char*>(&XRES), sizeof(int));
file.read(reinterpret_cast<char*>(&WIN_MAV), sizeof(int));
cout << "numChannels: " << numChannels << endl;
cout << "M_MAX: " << M_MAX << endl;
cout << "xmax: " << xmax << endl;
cout << "XRES: " << XRES << endl;
cout << "WIN_MAV: " << WIN_MAV << endl;
file.read(reinterpret_cast<char*>(&alpha), sizeof(double));
file.read(reinterpret_cast<char*>(&beta), sizeof(double));
file.read(reinterpret_cast<char*>(&p_star), sizeof(double));
cout << "alpha: " << alpha << endl;
cout << "beta: " << beta << endl;
cout << "p_star: " << p_star << endl;
file.read(reinterpret_cast<char*>(&M), sizeof(int));
file.read(reinterpret_cast<char*>(®hold), sizeof(int));
cout << "M: " << M << endl;
cout << "reghold: " << reghold << endl;
file.read(reinterpret_cast<char*>(&tableSize), sizeof(int));
file.read(reinterpret_cast<char*>(&discretizeStep), sizeof(int));
cout << "tableSize: " << tableSize << endl;
cout << "discretizeStep: " << discretizeStep << endl;
for (int k = 0; k < tableSize; k++) {
file.read(reinterpret_cast<char*>(&normalTable[k]), sizeof(double));
}
cout << "normalTable" << endl;
for (int n = 0; n < numChannels; n++) {
file.read(reinterpret_cast<char*>(&sig2_reg[n]), sizeof(double));
file.read(reinterpret_cast<char*>(&sig2_update[n]), sizeof(double));
}
cout << "sig2" << endl;
for (int m = 0; m < M; m++) {
for (int n = 0; n < numChannels; n++) {
for (int k = 0; k < XRES; k++) {
file.read(reinterpret_cast<char*>(&pdist_prior[k + n * XRES + m * XRES * numChannels]), sizeof(double));
file.read(reinterpret_cast<char*>(&pdist_post[k + n * XRES + m * XRES * numChannels]), sizeof(double));
}
}
}
cout << "pdist" << endl;
file.close();
}
void TFcore::computeDiffusion() {
// pdist_prior => pshift(dist_prior)
for (int m = 0; m < M; m++) {
for (int n = 0; n < numChannels; n++) {
memcpy(&p_shift[1 + n * XRES + m * (XRES * numChannels)],
&pdist_prior[0 + n * XRES + m * (XRES * numChannels)],
(XRES - 1) * sizeof(double));
p_shift[0 + n * XRES + m * (XRES * numChannels)] = p_shift[1 + n * XRES + m * (XRES * numChannels)];
}
}
// compute (p, pshift) => dp
for (int m = 0; m < M; m++) {
for (int n = 0; n < numChannels; n++) {
for (int k = 0; k < XRES; k++) {
dp[k + n * XRES + m * XRES * numChannels]
= (double)XRES * (pdist_prior[k + n * XRES + m * XRES * numChannels] - p_shift[k + n * XRES + m * XRES * numChannels]);
}
}
}
// memcpy dp(dist_prior) => dpshift(dist_prior)
for (int m = 0; m < M; m++) {
for (int n = 0; n < numChannels; n++) {
memcpy(&dp_shift[1 + n * XRES + m * (XRES * numChannels)],
&dp[0 + n * XRES + m * (XRES * numChannels)],
(XRES - 1) * sizeof(double));
dp_shift[0 + n * XRES + m * (XRES * numChannels)] = dp_shift[1 + n * XRES + m * (XRES * numChannels)];
}
}
// compute (dp, dpshift) => ddp
for (int m = 0; m < M; m++) {
for (int n = 0; n < numChannels; n++) {
for (int k = 0; k < XRES; k++) {
ddp[k + n * XRES + m * XRES * numChannels]
= (double)XRES * (dp[k + n * XRES + m * XRES * numChannels] - dp_shift[k + n * XRES + m * XRES * numChannels]);
}
}
}
// weighted summation
for (int m = 0; m < M; m++) {
for (int n = 0; n < numChannels; n++) {
for (int k = 0; k < XRES; k++) {
pdist_prior[k + n * XRES + m * XRES * numChannels]
= pdist_prior[k + n * XRES + m * XRES * numChannels] + alpha * ddp[k + n * XRES + m * XRES * numChannels];
}
}
}
}
void TFcore::computeProduct() {
for (int m = 0; m < M; m++) {
for (int n = 0; n < numChannels; n++) {
for (int k = 0; k < XRES; k++) {
pdist_post[k + n * XRES + m * XRES * numChannels]
= pdist_prior[k + n * XRES + m * XRES * numChannels]
* pdist_lik[k + n * XRES + m * XRES * numChannels];
}
}
}
}
void TFcore::loadData(string filename) {
string extension = filename.substr(filename.find_last_of(".") + 1);
//if (!strcmp(extension.c_str(), "mat")) {
// MATFile* pmat = matOpen(filename.c_str(), "r");
// if (pmat == NULL) {
// return;
// }
// mxArray* mxdata = matGetVariable(pmat, "emg");
// int m, n;
// m = mxGetM(mxdata);
// n = mxGetN(mxdata);
// double* data = mxGetPr(mxdata);
// for (int i = 0; i < m * n; i++) {
// dataStack.push_back(data[i]);
// }
// initModel(n);
// setNumSamples(m);
// mxDestroyArray(mxdata);
// matClose(pmat);
//}
if (!strcmp(extension.c_str(), "csv")) {
// comma separated file (samples x channels)
inputFile.open(filename.c_str(), ios::in);
int m = 0;
int n = 0;
string data;
string line;
stringstream parse;
getline(inputFile, line, '\n'); m++;
parse.str(line);
while (getline(parse, data, ',')) n++;
while (getline(inputFile, line, '\n')) m++;
inputFile.clear();
inputFile.seekg(0, ios::beg);
initModel(n);
setNumSamples(m);
}
}
void TFcore::loadData(fs::path filename) {
if (filename.extension() == ".csv") {
// comma separated file (samples x channels)
inputFile.open(filename, ios::in);
int m = 0;
int n = 0;
string data;
string line;
stringstream parse;
getline(inputFile, line, '\n'); m++;
parse.str(line);
while (getline(parse, data, ',')) n++;
while (getline(inputFile, line, '\n')) m++;
inputFile.clear();
inputFile.seekg(0, ios::beg);
initModel(n);
setNumSamples(m);
}
}
void TFcore::loadData(vector<double> data, int ch) {
initModel(ch);
setNumSamples(data.size() / ch);
for (int k = 0; k < data.size(); k++) dataStack.push_back(data[k]);
}
void TFcore::computeLikelihood(double* mu, double* sig2) {
double* Z = (double*)calloc(XRES * numChannels, sizeof(double));
int id = 0;
for (int k = 0; k < XRES; k++) {
for (int n = 0; n < numChannels; n++) {
Z[k + n * XRES] = abs(((k + 1) / (double)XRES) - mu[n]);
Z[k + n * XRES] *= discretizeStep / sqrt(sig2[n]);
id = floor(abs(Z[k + n * XRES]));
if (id < tableSize) {
pdist_lik[k + n * XRES] = normalTable[id];
}
else {
pdist_lik[k + n * XRES] = EPSILON;
}
}
}
// memcpy normalize ع
double psum;
for (int n = 0; n < numChannels; n++) {
psum = 0;
for (int k = 0; k < XRES; k++) psum += pdist_lik[k + n * XRES];
for (int k = 0; k < XRES; k++) pdist_lik[k + n * XRES] /= psum;
}
if (M > 1) {
for (int m = 1; m < M; m++) memcpy(&pdist_lik[m * XRES * numChannels], &pdist_lik[0], XRES * numChannels * sizeof(double));
}
free(Z);
}
void TFcore::registerPattern(double* mu, double* sig2) {
double* Z = (double*)calloc(XRES * numChannels, sizeof(double));
int id = 0;
for (int n = 0; n < numChannels; n++) {
for (int k = 0; k < XRES; k++) {
Z[k + n * XRES] = abs(((k + 1) / (double)XRES) - mu[n]);
Z[k + n * XRES] *= discretizeStep / sqrt(sig2[n]);
id = floor(abs(Z[k + n * XRES]));
if (id < tableSize) {
pdist_post[k + n * XRES + M * XRES * numChannels] = normalTable[id];
}
else {
pdist_post[k + n * XRES + M * XRES * numChannels] = EPSILON;
}
}
}
double psum;
for (int n = 0; n < numChannels; n++) {
psum = 0;
for (int k = 0; k < XRES; k++) psum += pdist_post[k + n * XRES + M * XRES * numChannels];
for (int k = 0; k < XRES; k++) pdist_post[k + n * XRES + M * XRES * numChannels] /= psum;
}
free(Z);
}
void TFcore::normalizeLikelihood() {
double psum;
for (int m = 0; m < M; m++) {
for (int n = 0; n < numChannels; n++) {
psum = 0;
for (int k = 0; k < XRES; k++) psum += pdist_lik[k + n * XRES + m * XRES * numChannels];
for (int k = 0; k < XRES; k++) pdist_lik[k + n * XRES + m * XRES * numChannels] /= psum;
}
}
}
void TFcore::normalizePrior() {
double psum;
for (int m = 0; m < M; m++) {
for (int n = 0; n < numChannels; n++) {
psum = 0;
for (int k = 0; k < XRES; k++) psum += pdist_prior[k + n * XRES + m * XRES * numChannels];
for (int k = 0; k < XRES; k++) pdist_prior[k + n * XRES + m * XRES * numChannels] /= psum;
}
}
}
void TFcore::normalizePost() {
double psum;
for (int m = 0; m < M; m++) {
for (int n = 0; n < numChannels; n++) {
psum = 0;
for (int k = 0; k < XRES; k++) psum += pdist_post[k + n * XRES + m * XRES * numChannels];
for (int k = 0; k < XRES; k++) pdist_post[k + n * XRES + m * XRES * numChannels] /= psum;
}
}
}
void TFcore::constructLookup() {
for (int i = 0; i < tableSize; i++) {
normalTable[i] = (1 / sqrt(2 * M_PI)) * exp(-0.5 * ((double)i / discretizeStep) * ((double)i / discretizeStep));
}
}
void TFcore::setVal(double* vector, double val) {
for (int k = 0; k < numChannels; k++) {
vector[k] = val;
}
}
void TFcore::setZero(double* vector) {
setVal(vector, (double)0);
}
void TFcore::setOnes(double* vector) {
setVal(vector, (double)1);
}
|
8,244 | #include <stdio.h>
#include <cuda_runtime.h>
#include <string.h>
#include <sys/time.h>
#include <time.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
} \
void initialData(float* ip, int size);
double cpuSecond();
void sumArraysOnHost(float* A, float* B, float* C, int N);
__global__ void sumArraysOnDevice(float* A, float* B, float* C, const int N);
void checkResult(float* hostResult, float* deviceResult, const int N);
int main(int argc, char** argv)
{
printf("%s Starting....\n", argv[0]);
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
int nElem = 1 << 24;
printf("Vector size %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
float *hA, *hB, *hostResult, * gpuResult;
hA = (float*)malloc(nBytes);
hB = (float*)malloc(nBytes);
hostResult = (float*)malloc(nBytes);
gpuResult = (float*)malloc(nBytes);
double iStart, iEnd, iElaps;
iStart = cpuSecond();
initialData(hA, nElem);
initialData(hB, nElem);
iElaps = cpuSecond() - iStart;
memset(hostResult, 0, nBytes);
memset(gpuResult, 0, nBytes);
iStart = cpuSecond();
sumArraysOnHost(hA, hB,hostResult,nElem);
iElaps = cpuSecond() - iStart;
printf("SumArrayOnCPU Time elapsed %fsec\n", iElaps);
float *dA, *dB, *dC;
cudaMalloc((float**)&dA, nBytes);
cudaMalloc((float**)&dB, nBytes);
cudaMalloc((float**)&dC, nBytes);
cudaMemcpy(dA,hA,nBytes,cudaMemcpyHostToDevice);
cudaMemcpy(dB,hB,nBytes,cudaMemcpyHostToDevice);
int iLen = 1024;
dim3 block(iLen);
dim3 grid((nElem + block.x-1)/block.x);
iStart = cpuSecond();
printf("Start: %f\n", iStart);
sumArraysOnDevice<<<grid, block>>>(dA, dB, dC,nElem);
cudaDeviceSynchronize();
iEnd = cpuSecond();
printf("End: %f\n", iEnd);
iElaps = iEnd - iStart;
printf("SumArrayOnGPU <<<%d, %d>>> Time elapsed %fsec\n", grid.x, block.x, iElaps);
cudaMemcpy(gpuResult, dC, nBytes, cudaMemcpyDeviceToHost);
checkResult(hostResult, gpuResult,nElem);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
free(hA);
free(hB);
free(hostResult);
free(gpuResult);
return(0);
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_sec * 1.e-6);
}
void initialData(float* ip, int size)
{
time_t t;
srand((unsigned int) time(&t));
for(int i=0;i<size;++i)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
void sumArraysOnHost(float* A, float* B, float* C, int N)
{
for(int i=0;i<N;++i)
{
C[i] = A[i] + B[i];
}
}
__global__ void sumArraysOnDevice(float* A, float* B, float* C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < N)
C[i] = A[i] + B[i];
}
void checkResult(float* hostResult, float* deviceResult, const int N)
{
double epsilon = 1.0E-8;
int match = 1;
for(int i =0;i<N;++i)
{
if(abs(hostResult[i] - deviceResult[i]) > epsilon)
{
match = 0;
printf("Array do not match\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostResult[i], deviceResult[i], i);
break;
}
}
if(match)
printf("Array match\n");
return;
} |
8,245 | #include "includes.h"
__global__ void __extractmat(double *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&c[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((double *)&vi);
b[i] = *(((int *)&c[i])+1);
}
} |
8,246 | //Note:
//Cara running program ./nama_file mode besar_matrix besar_grid besar_block
//Ukuran matrix: besar_matrix x besar matrix
//Grid: besar_grid x besar_grid (block per grid) | Max: Mengacu pada NVIDIA Compute Capability dari setiap seri GPU
//Block: besar_block x besar_block (thread per block) | Max: Mengacu pada NVIDIA Compute Capability dari setiap seri GPU
// Mode:
// 0: Matrix multiplication pada 1 GPU tanpa melihat hasil sekuensial
// 1: Matrix multiplication pada 1 GPU dengan hasil sekuensial
// 2: Matrix multiplication pada multiple GPU tanpa melihat hasil sekuensial
// 3: Matrix multiplication pada multiple GPU dengan hasil sekuensial
// mode 2 ketas belum selesai dikerjakan
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
//Operasi perkalian matrix pada gpu
__global__ void matrixmul_kernel(int *gpu_matrixA, int *gpu_matrixB, int *gpu_result, int matrix_size, int grid, int block){
int l, m, n, R, displacement;
if(matrix_size > (grid * block)) displacement = matrix_size/(grid * block);
else displacement = 1;
int row_index = blockIdx.y * blockDim.y + threadIdx.y;
int col_index = blockIdx.x * blockDim.x + threadIdx.x;
if(row_index < matrix_size && col_index < matrix_size){
for(m = 0; m < displacement; m++){
for(n = 0; n < displacement; n++){
R = 0;
for(l = 0; l < matrix_size; l++){
int A = gpu_matrixA[(row_index * displacement + m) * matrix_size + l];
int B = gpu_matrixB[l * matrix_size + (col_index * displacement + n)];
R += A * B;
}
gpu_result[(row_index * displacement + m) * matrix_size + (col_index * displacement + n)] = R;
}
}
}
}
int main(int argc, char** argv){
srand(time(NULL));
double runtime;
struct timespec begin, end;
// Inisialisasi parameter dari user input
int mode = atoi(argv[1]);
int matrix_size = atoi(argv[2]);
int igrid = atoi(argv[3]);
int iblock = atoi(argv[4]);
//Debug print variabel user input
//printf("Mode: %d\n", mode);
//printf("Size %d x %d\n", matrix_size, matrix_size);
//printf("Grid: %d\n", igrid);
//printf("Block:%d\n", iblock);
// Inisailiasai pada Host
int matrixBytes = (matrix_size * matrix_size) * sizeof(int);
int *matrixA = (int *)malloc(matrixBytes);
int *matrixB = (int *)malloc(matrixBytes);
int *result = (int *)malloc(matrixBytes);
int i, j, k;
//Inisialisasi martrix
for(i = 0; i < matrix_size * matrix_size; i++){
matrixA[i] = rand() % 99 + 1;
matrixB[i] = rand() % 99 + 1;
}
//Operasi dengan 1 GPU
//if(mode < 2){
clock_gettime(CLOCK_REALTIME, &begin);
//Inisialisasi pada GPU
int *gpu_matrixA, *gpu_matrixB, *gpu_result;
cudaMalloc((void **) &gpu_matrixA, matrixBytes);
cudaMalloc((void **) &gpu_matrixB, matrixBytes);
cudaMalloc((void **) &gpu_result, matrixBytes);
cudaMemcpy(gpu_matrixA, matrixA, matrixBytes, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_matrixB, matrixB, matrixBytes, cudaMemcpyHostToDevice);
//Mulai operasi pada device
dim3 grid(igrid, igrid);
dim3 block(iblock, iblock);
matrixmul_kernel<<<grid, block>>>(gpu_matrixA, gpu_matrixB, gpu_result, matrix_size, igrid, iblock);
//Return hasil perkalian
cudaMemcpy(result, gpu_result, matrixBytes, cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_REALTIME, &end);
runtime = (end.tv_sec - begin.tv_sec) + (end.tv_nsec - begin.tv_nsec) / 1000000000.0;
printf("Running Time: %f\n\n", runtime);
/*}else{
//Operasi pada multiple GPU
//Check Device
clock_gettime(CLOCK_REALTIME, &begin);
int device_count;
cudaGetDeviceCount(&device_count);
printf("Device: %d\n", device_count);
clock_gettime(CLOCK_REALTIME, &end);
runtime = (end.tv_sec - begin.tv_sec) + (end.tv_nsec - begin.tv_nsec) / 1000000000.0;
printf("Running Time: %f\n\n", runtime);
}*/
//Operasi sekuensial
if(mode == 1 || mode == 3){
int right_answer = 0;
int *seqresult = (int *)malloc(matrixBytes);
for (i = 0; i < matrix_size; i++){
for (j = 0; j < matrix_size; j++){
seqresult[i * matrix_size + j] = 0;
for (k = 0; k < matrix_size; k++)
seqresult[i * matrix_size + j] += matrixA[i * matrix_size + k] * matrixB[k * matrix_size + j];
if(seqresult[i * matrix_size + j] == result[i * matrix_size + j]) right_answer += 1;
//printf("%d - %d S: %d, CUDA: %d\n", i * matrix_size, j, seqresult[i * matrix_size + j], result[i * matrix_size + j]);
}
}
if(right_answer == (matrix_size * matrix_size)) printf("The answer is matched.\n");
free(seqresult);
}
//Membebaskan Device
cudaFree(gpu_matrixB);
cudaFree(gpu_matrixB);
cudaFree(gpu_result);
//Membebaskan Host
free(matrixA);
free(matrixB);
free(result);
}
|
8,247 | //pass
//--blockDim=32 --gridDim=2
__global__ void foo() {
__threadfence();
}
|
8,248 | #include <stdio.h>
#include <cuda_runtime.h>
#include <sys/time.h>
inline double seconds()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
void intialData(float *input, int n)
{
for (int i=0;i<n;i++)
{
input[i] = (float)rand()/10.f;
}
return;
}
void transposeHost(float *out, const float *in , const int nx, const int ny)
{
for (int j = 0;j<ny;j++)
{
for (int i = 0;i<nx;i++)
{
out[i*ny+j] = in[j*nx+i];
}
}
return ;
}
__global__ void warmup(float *in, float *out, const int nx, const int ny)
{
unsigned int i = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int j = threadIdx.y+blockDim.y*blockIdx.y;
if (i<nx && j<ny)
{
out[j*nx+i] = in[j*nx+i];
}
}
__global__ void copyGlobalRow(float *in, float *out, const int nx, const int ny)
{
unsigned int i = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int j = threadIdx.y+blockDim.y*blockIdx.y;
if (i<nx && j<ny)
{
out[j*nx+i] = in[j*nx+i];
}
}
__global__ void copyGlobalCol(float *out, float *in, const int nx, const int ny)
{
unsigned int i = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int j = threadIdx.y+blockDim.y*blockIdx.y;
if (i<nx && j<ny)
{
out[i*ny+j] = in[i*ny+j];
}
}
__global__ void transposeGlobalRow(float *in, float *out, const int nx, const int ny)
{
unsigned int i = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int j = threadIdx.y+blockDim.y*blockIdx.y;
if (i<nx && j<ny)
{
out[i*ny+j] = in[j*nx+i];
}
}
__global__ void transposeGlobalCol(float *in, float *out, const int nx, const int ny)
{
unsigned int i = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int j = threadIdx.y+blockDim.y*blockIdx.y;
if (i<nx && j<ny)
{
out[j*nx+i] = in[i*ny+j];
}
}
void verify(const float *host, const float *gpu , const int nx, const int ny)
{
for (int j = 0;j<ny;j++)
{
for (int i = 0;i<nx;i++)
{
if (host[j*nx + i] != gpu[j*nx + i]){
printf("Error\n");
break;
}
}
}
}
int main(){
int nx = 1 <<11;
int ny = 1 <<11;
size_t nBytes = (nx*ny)*sizeof(float);
int nRep = 1;
float ms = 0;
float bw = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 block (16, 16);
dim3 grid ((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
float *a_h = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
printf("Matrix %d nx %d ny\n", nx,ny);
intialData(a_h, nx*ny);
transposeHost(hostRef, a_h, nx, ny);
//allocate device memory
float *a_d, *c_d;
cudaMalloc((float **)&a_d,nBytes);
cudaMalloc((float **)&c_d,nBytes);
//copy data
cudaMemcpy(a_d,a_h, nBytes, cudaMemcpyHostToDevice);
warmup<<<grid,block>>>(a_d,c_d,nx,ny);
//copyGlobalRow
cudaEventRecord(start,0);
for (int k=0;k<nRep;k++)
{
copyGlobalRow<<<grid,block>>>(a_d,c_d,nx,ny);
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
bw = 2 * nx * ny * sizeof(float)/ms/1e6;
printf("copyGlobalRow: %f ms, effective bandwidth %f GB/s\n",ms/((float)nRep),bw/((float)nRep));
//copyGlobalCol
cudaEventRecord(start);
for (int k=0;k<nRep;k++)
{
copyGlobalCol<<<grid,block>>>(a_d,c_d,nx,ny);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
bw = 2 * nx * ny * sizeof(float)/ms/1e6;
printf("copyGlobalCol: %f ms, effective bandwidth %f GB/s\n",ms/((float)nRep),bw/((float)nRep));
//transposeGlobalRow
cudaEventRecord(start,0);
for (int k=0;k<nRep;k++)
{
transposeGlobalRow<<<grid,block>>>(a_d,c_d,nx,ny);
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
bw = 2 * nx * ny * sizeof(float)/ms*1e-6;
printf("transposeGlobalRow: %f ms, effective bandwidth %f GB/s\n",ms/((float)nRep),bw/((float)nRep));
cudaMemcpy(gpuRef,c_d, nBytes, cudaMemcpyDeviceToHost);
verify(hostRef,gpuRef,nx,ny);
//transposeGlobalCol
cudaEventRecord(start,0);
for (int k=0;k<nRep;k++)
{
transposeGlobalCol<<<grid,block>>>(a_d,c_d,nx,ny);
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
bw = 2 * nx * ny * sizeof(float)/ms*1e-6;
printf("transposeGlobalCol: %f ms, effective bandwidth %f GB/s\n",ms/((float)nRep),bw/((float)nRep));
cudaMemcpy(gpuRef,c_d, nBytes, cudaMemcpyDeviceToHost);
verify(hostRef,gpuRef,nx,ny);
cudaFree(a_d);
cudaFree(c_d);
free(hostRef);
free(gpuRef);
free(a_h);
cudaDeviceReset();
return 0;
}
|
8,249 | /* 2013
* Maciej Szeptuch
* II UWr
*/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <errno.h>
#define WORD_MAXLEN 16
#define STEP_0_THREADS 128
#define STEP_R_THREADS 128
#define __CUDA__
#define __CPU__
__device__ __host__ inline int MIN(const int a, const int b) { return a<b?a:b; }
__device__ __host__ int LevenshteinDistance(const char *const A, const char *const B);
char *loadDictionary(const char *const file, int &words, int &size);
void printHead(void);
#ifdef __CUDA__
__global__ void LevenshteinCUDA_STEP_0(const char *const dictionary, const int words, const char *const pattern, int *result);
__global__ void LevenshteinCUDA_STEP_R(const int *from, int *to, const int words);
#endif // __CUDA__
#ifdef __CPU__
int LevenshteinCPU(const char *const dictionary, const int words, const char *const pattern);
#endif // __CPU__
int main(const int argc, const char *const* argv)
{
if(argc < 3)
{
fprintf(stderr, "usage: %s dictionary words...\nError: not enough arguments\n", argv[0]);
return 1;
}
int dictionarySize = 0,
dictionaryWords = 0;
char *dictionary = loadDictionary(argv[1], dictionaryWords, dictionarySize);
if(!dictionary)
{
fprintf(stderr, "usage: %s dictionary words...\nError: loading dictionary: %s\n", argv[0], strerror(errno));
return 2;
}
#ifdef __CUDA__
// GPU INIT
char *cudaDictionary = NULL,
*cudaPattern = NULL;
int *cudaResult = NULL;
int alignedDictionarySize = 1;
while(alignedDictionarySize < dictionarySize)
alignedDictionarySize <<= 1;
cudaMalloc(&cudaDictionary, alignedDictionarySize * sizeof(char));
cudaMemcpy(cudaDictionary, dictionary, dictionarySize * sizeof(char), cudaMemcpyHostToDevice);
cudaMalloc(&cudaPattern, WORD_MAXLEN * sizeof(char));
cudaMalloc(&cudaResult, alignedDictionarySize * 2 * sizeof(int));
#endif // __CUDA__
printHead();
for(int a = 2; a < argc; ++ a)
{
int result[2] = {1 << 30, 1 << 30};
char pattern[WORD_MAXLEN + 2] = {};
memcpy(pattern, argv[a], strlen(argv[a]) * sizeof(char));
printf(" %-16s | ", pattern);
#ifdef __CUDA__
{
// GPU TEST
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, NULL);
cudaMemcpy(cudaPattern, pattern, WORD_MAXLEN * sizeof(char), cudaMemcpyHostToDevice);
LevenshteinCUDA_STEP_0<<<(dictionaryWords + STEP_0_THREADS - 1) / STEP_0_THREADS, STEP_0_THREADS>>> (cudaDictionary, dictionaryWords, cudaPattern, cudaResult);
for(int size = STEP_R_THREADS; size < dictionaryWords; size <<= 1)
LevenshteinCUDA_STEP_R<<<(dictionaryWords + size - 1) / size, STEP_R_THREADS>>> (cudaResult, cudaResult, dictionaryWords);
cudaMemcpy(result, cudaResult, 2 * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(end, NULL);
cudaEventSynchronize(end);
float gputotal = 0;
cudaEventElapsedTime(&gputotal, start, end);
printf("%-16s [%11.6f] | ", &dictionary[result[0] * WORD_MAXLEN], gputotal, result[1]);
}
#endif // __CUDA__
#ifdef __CPU__
{
// CPU TEST
timeval start, end;
gettimeofday(&start, NULL);
result[0] = LevenshteinCPU(dictionary, dictionaryWords, pattern);
gettimeofday(&end, NULL);
float cputotal = (end.tv_sec - start.tv_sec) * 1000.0f + (end.tv_usec - start.tv_usec) / 1000.0f;
printf("%-16s [%11.6f] | ", dictionary + result[0] * WORD_MAXLEN, cputotal);
}
#endif // __CPU__
printf("%d\n", LevenshteinDistance(pattern, dictionary + result[0] * WORD_MAXLEN));
}
#ifdef __CUDA__
cudaFree(cudaDictionary);
#endif // __CUDA__
free(dictionary);
return 0;
}
char *loadDictionary(const char *const file, int &words, int &size)
{
FILE *handle = fopen(file, "rb");
if(!handle)
return NULL;
char *dictionary = NULL,
*current = NULL;
char buffer[64] = {};
words = 0;
while(fgets(buffer, 64, handle))
++ words;
fseek(handle, 0, SEEK_SET);
size = words * WORD_MAXLEN;
current = dictionary = new char[size];
memset(dictionary, 0, size * sizeof(char));
while(fgets(current, WORD_MAXLEN + 8, handle))
{
current[strlen(current) - 1] = 0; // remove \n
current[strlen(current) - 1] = 0; // remove \r
current += WORD_MAXLEN;
}
fclose(handle);
return dictionary;
}
#ifdef __CPU__
int LevenshteinCPU(const char *const dictionary, const int words, const char *const pattern)
{
const char *word = dictionary;
int best = 1 << 30,
r = 0;
for(int w = 0; w < words; ++ w, word += WORD_MAXLEN)
{
int dist = LevenshteinDistance(pattern, word);
if(dist < best)
{
best = dist;
r = w;
}
}
return r;
}
#endif // __CPU__
__device__ __host__ int LevenshteinDistance(const char *const A, const char *const B)
{
int sa = 0,
sb = 0;
while(A[sa ++] > 0);
while(B[sb ++] > 0);
int temp[2][WORD_MAXLEN + 1] = {};
int t = 1;
for(int a = 0; a <= sb; ++ a)
temp[0][a] = a;
for(int a = 1; a <= sa; ++ a, t ^= 1)
{
temp[t][0] = a;
for(int b = 1; b <= sb; ++ b)
temp[t][b] = MIN(temp[t ^ 1][ b ] + 1,
MIN(temp[ t ][b - 1] + 1,
temp[t ^ 1][b - 1] + (A[a-1] != B[b-1])));
}
return temp[t^1][sb];
}
void printHead(void)
{
printf(" word | ");
#ifdef __CUDA__
printf(" gpu | ");
#endif // __CUDA__
#ifdef __CPU__
printf(" cpu | ");
#endif // __CPU__
printf("distance\n");
printf("------------------|-");
#ifdef __CUDA__
printf("-------------------------------|-");
#endif // __CUDA__
#ifdef __CPU__
printf("-------------------------------|-");
#endif // __CPU__
printf("---------\n");
}
#ifdef __CUDA__
__global__ void LevenshteinCUDA_STEP_0(const char *dictionary, const int words, const char *pattern, int *result)
{
int word = blockIdx.x * STEP_0_THREADS + threadIdx.x;
if(word >= words)
return;
result[word * 2] = word;
result[word * 2 + 1] = LevenshteinDistance(pattern, dictionary + word * WORD_MAXLEN);
}
__global__ void LevenshteinCUDA_STEP_R(const int *from, int *to, const int words)
{
__shared__ int local_data[STEP_R_THREADS * 2];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + tid;
local_data[tid * 2] = from[i * 2];
local_data[tid * 2 + 1] = from[i * 2 + 1];
__syncthreads();
for(int s = 1; s < blockDim.x && tid + s < words; s <<= 1)
if(tid % (2 * s) == 0 && local_data[tid * 2 + 1] > local_data[(tid + s) * 2 + 1])
{
local_data[tid * 2] = local_data[(tid + s) * 2];
local_data[tid * 2 + 1] = local_data[(tid + s) * 2 + 1];
}
if(tid == 0)
{
to[blockIdx.x * 2] = local_data[0];
to[blockIdx.x * 2 + 1] = local_data[1];
}
}
#endif // __CUDA__
|
8,250 | #include "includes.h"
__global__ void buildCompactedIndexKernel( const unsigned* valid_indicator, const unsigned table_size, unsigned* compacted_index ) {
const auto idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx < table_size) {
unsigned offset = 0xffffffffu;
if(valid_indicator[idx] > 0) {
offset = compacted_index[idx] - 1;
}
compacted_index[idx] = offset;
}
} |
8,251 | #include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include "inner.hpp"
#define cu_dot(x, y) thrust::inner_product((x).begin(), (x).end(), (y).begin(), 0)
using namespace std;
typedef thrust::device_vector<int> dvi;
template<>
int InnerProd<gpu, int>::operator ()(const vector<int> &x, const vector<int> &y){
dvi dx = x, dy = y;
return cu_dot(dx, dy);
}
|
8,252 | #include <cuda_runtime.h>
#include <stdio.h>
#define N 256
__global__ void matrix_vector_multi_gpu_1_1(float *A_d, float *B_d, float *C_d)
{
int i,j;
for(j=0;j<N;j++){
A_d[j] = 0.0F;
for(i=0;i<N;i++) {
A_d[j] = A_d[j]+B_d[j*N+i]*C_d[i];
}
}
}
__global__ void matrix_vector_multi_gpu_1_1_sh(float *A_d, float *B_d, float *C_d)
{
int i;
__shared__ float tmp_c[N];
tmp_c[threadIdx.x] = C_d[threadIdx.x];
__syncthreads();
A_d[threadIdx.x] = 0.0F;
for(i=0;i<N;i++){
A_d[threadIdx.x] = A_d[threadIdx.x] + B_d[threadIdx.x*N+1]*tmp_c[i];
}
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
unsigned int t, travdirtime;
int i,j;
float A[N], B[N*N], C[N];
float *A_d, *B_d, *C_d;
dim3 blocks(1,1,1);
dim3 threads(1,1,1);
for(j=0;j<N;j++) {
for(i=0;i<N;i++) {
B[j*N+i] = ((float)j)/256.0;
}
}
for(j=0;j<N;j++)
C[j] = 1.0F;
cudaMalloc((void **)&A_d, N*sizeof(float));
cudaMalloc((void **)&B_d, N*N*sizeof(float));
cudaMalloc((void **)&C_d, N*sizeof(float));
cudaMemcpy(A_d, A, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, N*N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(C_d, C, N*sizeof(float), cudaMemcpyHostToDevice);
matrix_vector_multi_gpu_1_1<<<blocks, threads>>>(A_d, B_d, C_d);
matrix_vector_multi_gpu_1_1_sh<<<blocks, threads>>>(A_d, B_d, C_d);
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return EXIT_SUCCESS;
}
|
8,253 | #include "includes.h"
__global__ void VectorInputDiffKernel( float *input, int inputSize, float *referenceVector, int maxCells, float *difference )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells * inputSize)
{
difference[threadId] = input[threadId % inputSize] - referenceVector[threadId];
}
} |
8,254 |
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
int match(char *regexp,char *text);/*match the regexp in text*/
int matchhere(char *regexp,char *text);/*matchhere the regexp in the front of the text*/
int matchstar (int c,char *regexp,char *text);/*matchstar the regexp with "*" in the front of the text*/
/*match the regexp in text*/
int match(char *regexp,char *text)
{
if(regexp[0] == '^')
{
return matchhere(regexp+1,text);
}
do{ /*scan char by char*/
if (matchhere(regexp,text))
{
return 1;
}
}while (*text++!= '\0');
return 0;
}
/*matchhere the regexp in the front of the text*/
int matchhere(char *regexp,char *text)
{
if (regexp[0] == '\0')
{
return 1;
}
if (regexp[1] == '*')
{
return matchstar(regexp[0],regexp+2,text);
}
if (regexp[0] == '$' && regexp[1]=='\0')
{
return *text == '\0';
}
if (*text!='\0' && (regexp[0]=='.' || regexp[0]==*text))
{
return matchhere(regexp+1,text+1);
}
return 0;
}
/*matchstar the regexp with "*" in the front of the text*/
int matchstar (int c,char *regexp,char *text)
{
do
{ /*"*"match the same char*/
if (matchhere(regexp,text))
{
return 1;
}
}while (*text!='\0' && (*text++ ==c || c== '.'));
return 0;
}
int main(int argc,char *argv[])
{
char line_buffer[1024];/* put the line */
FILE *fp; /* file pointer*/
if (argc < 3)
{
fprintf(stderr,"Use: %s regular_expr files ..\n", argv[0]);
return 1;
}
for (int i=2;i < argc;i++)
{
if ((fp=fopen(argv[i],"rb"))==NULL)
{
printf("%s: read failure.\n",argv[i]);
}
else while (fgets(line_buffer,sizeof(line_buffer),fp))
{
if(match(argv[1],line_buffer)==1)/*match the regular expression,if success, return 1*/
{
printf("%s\n",line_buffer);/*print the results*/
}
}
fclose(fp);
}
return 0;
}
|
8,255 | #include "includes.h"
__global__ void adicionarKernel(double* resultado, const double* n) {
int i = threadIdx.x;
double a = 1, b = 0;
double delta = pow(b, 2) - (4 * a * (n[i] * -1));
resultado[i] = ((b * -1) + sqrt(delta)) / 2 * a;
} |
8,256 | #include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
static __device__ float E = 2.718281828;
static __device__ int getIndex(int *ids, int ndim, int *dims)
{
int i, id;
for (i = 0, id = ids[0]; i < ndim-1; i++)
id = dims[i+1] * id + ids[i+1];
return id;
}
static __device__ void getIndexes(int id, int *ids, int ndim, int *dims)
{
for (int i = ndim-1; i >=0; i--) {
ids[i] = id % dims[i];
id = id / dims[i];
}
}
/* __global__ void sliceTensorKernel(float *src, float *dst, int sdim, int ddim, int start, int block_size) */
/* { */
/* int di = blockIdx.x * block_size + threadIdx.x; */
/* /\* si is the index of src elements to be copied. */
/* The "block index" of src[si] is (blockIdx.x / ddim * sdim + blockIdx.x % ddim + start) *\/ */
/* int si = (blockIdx.x / ddim * sdim + blockIdx.x % ddim + start) * block_size + threadIdx.x; */
/* dst[di] = src[si]; */
/* } */
__global__ void sliceTensorKernel(float *src, float *dst, int start, int s_vol, int d_vol, int vol, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
int si = di / d_vol * s_vol + di % d_vol + start * vol;
dst[di] = src[si];
}
__global__ void reduceArgMaxKernel(float *src, float *dst, float *arg, int dim_size, int reduce_vol, int batch_vol, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
/* src[si] is the first element in this thread to be compared, then
si = batch_vol * batch + (di - reduce_vol * batch),
where batch = di / reduce_vol,
which is the same as the following code: */
int si = (batch_vol - reduce_vol) * (di / reduce_vol) + di;
float now = src[si], max = now;
int maxi = 0;
for (int i = 1; i < dim_size; i++) {
now = src[si+i*reduce_vol];
if (now > max) {
max = now;
maxi = i;
}
}
dst[di] = max;
arg[di] = maxi;
}
__global__ void multiplyElementKernel(float *src1, float *src2, float *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
dst[di] = src1[di] * src2[di];
}
__global__ void transposeTensorKernel(float *src, float *dst, int ndim, int *s_dims, int *d_dims, int *s_ids, int *d_ids, int *axes, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
int *t_s_ids = s_ids + di * ndim;
int *t_d_ids = d_ids + di * ndim;
getIndexes(di, t_d_ids, ndim, d_dims);
for (int i = 0; i < ndim; i++)
t_s_ids[axes[i]] = t_d_ids[i];
int si = getIndex(t_s_ids, ndim, s_dims);
dst[di] = src[si];
}
__global__ void transformBboxSQDKernel(float *delta, float *anchor, float *res, float width, float height, float img_width, float img_height, int x_shift, int y_shift, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
/* int batch_idx = di / anchor_num; */
/* now only support batch_size = 1 */
float x_scale = 1.0 * width / img_width;
float y_scale = 1.0 * height / img_height;
/* (not used) si is the index of the first elements to be computed in the thread, then
si = 4 * anchor_num * batch_idx + (di - anchor_num * batch_idx),
which is the same as the following code: */
/* int si = 3 * anchor_num * batch_idx + di; */
/* take 4 elements from each of delta and anchor */
int si = di * 4;
float d[4] = {delta[si], delta[si+1], delta[si+2], delta[si+3]};
float a[4] = {anchor[si], anchor[si+1], anchor[si+2], anchor[si+3]};
/* compute and put 4 result elements to res, according to SqueezeDet's source code */
/* TODO: don't know why (maybe the resize), always has some shift compared to groundtruth*/
float cx = (a[0] + d[0] * a[2]) / x_scale + x_shift;
float cy = (a[1] + d[1] * a[3]) / y_scale + y_shift;
float w = (a[2] * (d[2] < 1 ? expf(d[2]) : d[2] * E)) / x_scale;
float h = (a[3] * (d[3] < 1 ? expf(d[3]) : d[3] * E)) / y_scale;
res[si] = min(max(cx - w * 0.5, 0), img_width - 1);
res[si+1] = min(max(cy - h * 0.5, 0), img_height - 1);
res[si+2] = max(min(cx + w * 0.5, img_width - 1), 0);
res[si+3] = max(min(cy + h * 0.5, img_height - 1), 0);
}
__global__ void pickElementsKernel(float *src, float *dst, int *idx, int stride, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
int si = idx[di];
for (int i = 0; i < stride; i++)
dst[di*stride+i] = src[si*stride+i];
}
|
8,257 | extern "C"
__global__ void surface_area(int size_faces, int* face_verts, double* coord_verts, double* partial_area)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < size_faces) {
int a_index = face_verts[3*tid + 0];
int b_index = face_verts[3*tid + 1];
int c_index = face_verts[3*tid + 2];
double ax = coord_verts[3*a_index + 0];
double ay = coord_verts[3*a_index + 1];
double az = coord_verts[3*a_index + 2];
double bx = coord_verts[3*b_index + 0];
double by = coord_verts[3*b_index + 1];
double bz = coord_verts[3*b_index + 2];
double cx = coord_verts[3*c_index + 0];
double cy = coord_verts[3*c_index + 1];
double cz = coord_verts[3*c_index + 2];
double s1 = ((ax * (by - cy))
- (bx * (ay - cy))
+ (cx * (ay - by)));
double s2 = ((ay * (bz - cz))
- (by * (az - cz))
+ (cy * (az - bz)));
double s3 = ((az * (bx - cx))
- (bz * (ax - cx))
+ (cz * (ax - bx)));
double face_area = 0.5 * sqrt((s1 * s1) + (s2 * s2) + (s3 * s3));
partial_area[tid] = face_area;
}
}
extern "C"
__global__ void multiply(int sizeB, int max, double w1, int* A_indexs, double* A_values, double* B, double* C, double* Displacement)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < sizeB){
double sum = 0.0;
int index_neighbor;
for(int i = 0; i < max; i++) {
index_neighbor = A_indexs[max*tid + i];
sum = sum + A_values[max*tid + i]*B[index_neighbor];
}
sum = sum + w1*B[tid];
C[tid] = sum;
Displacement[tid] = B[tid] - C[tid];
}
} |
8,258 | #include "includes.h"
//==========================================================================================================
// A small snippet of code to solve equation of types Ax=B using Gaussian Elimniation
// Author - Anmol Gupta, Naved Ansari
// Course - EC513 - Introduction to Computer Architecture
// Boston University
//==========================================================================================================
//==========================================================================================================
// Command to compile the code
//nvcc -o GaussianElimination GaussianElimination.cu
//==========================================================================================================
// Assertion to check for errors
__global__ void gauss_elimination_cuda(float *a_d, float *b_d ,int size) {
int idx = threadIdx.x;
int idy = threadIdx.y;
__shared__ float temp[40][40];
temp[idy][idx] = a_d[(idy * (size+1)) + idx];
__syncthreads();
//cuPrintf("T idy=%d, idx=%d, temp=%f\n", idy, idx, a_d[(idy * (size+1)) + idx]);
for(int column = 0; column < size-1; column++){
if(idy > column && idx >= column){
float t = temp[column][idx] - (temp[column][column] / temp[idy][column]) * temp[idy][idx];
__syncthreads();
temp[idy][idx] = t;
}
__syncthreads();
}
b_d[idy*(size+1) + idx] = temp[idy][idx];
} |
8,259 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#define MAX_NAME 256 /* tamanho maximo para nome de arquivo */
#define PI 3.14159265359
double **matrizpesos;
struct param{
int totalthreads;
int linhas;
int colunas;
int tamanho; //se a divisao nao for inteira aqui vai o mais 1
int sobra; //resto da divisao que vamos distribuir
int raio; //distancia para considerar os pixels
};
void InicializaMatrizPesos(int raio);
double **AlocaMatrizDouble(int lin, int col);
double **LiberaMatrizDouble(int lin, int col, double **mat);
int **AlocaMatriz(int lin, int col);
int **LiberaMatriz(int lin, int col, int **mat);
__global__ void filtra(int **cudaOldimage, int **cudaNewimage, double **cudaMatrizPesos, param parameters){
int linhai, linhaf, deslocamento, meutam, index;
index = threadIdx.x + blockIdx.x * blockDim.x; //index global da thread
meutam = parameters.tamanho;
deslocamento = 0;
if (parameters.sobra > 0){
if (index < parameters.sobra){
meutam++;
}
else {
deslocamento = parameters.sobra;
}
}
linhai = (meutam)* index + deslocamento;
if (index == parameters.totalthreads - 1){
linhaf = parameters.linhas - 1;
}
else {
linhaf = (meutam)+linhai - 1;
}
//printf("Eu sou a thread %d fico com: linha %d a %d\n", index, linhai, linhaf);
//tratamento_normal(linhai, linhaf);
int dls, dli, dce, dcd;
int r, g, b, nr, ng, nb;
int l;
int c;
int lin_mat_pes, col_mat_pes;
double acumular, acumulag, acumulab;
for (l = linhai; l <= linhaf; l++){
if ((l - parameters.raio) < 0){
dls = parameters.raio - l;
}
else dls = 0;
if ((l + parameters.raio) >= parameters.linhas){
dli = l + parameters.raio - (parameters.linhas - 1);
}
else dli = 0;
//printf("Linha: %d ... dls(%d) e dli(%d)\n", l, dls, dli);
for (c = 0; c < parameters.colunas; c++){
acumular = 0;
acumulag = 0;
acumulab = 0;
if ((c - parameters.raio) < 0){
dce = parameters.raio - c;
}
else dce = 0;
if ((c + parameters.raio) >= parameters.colunas){
dcd = c + parameters.raio - (parameters.colunas - 1);
}
else dcd = 0;
for (lin_mat_pes = dls; lin_mat_pes < (parameters.raio * 2 + 1 - dli); lin_mat_pes++){
for (col_mat_pes = dce; col_mat_pes < (parameters.raio * 2 + 1 - dcd); col_mat_pes++){
r = cudaOldimage[l - parameters.raio + lin_mat_pes][c - parameters.raio + col_mat_pes] / 1000000;
g = (cudaOldimage[l - parameters.raio + lin_mat_pes][c - parameters.raio + col_mat_pes] - r * 1000000) / 1000;;
b = cudaOldimage[l - parameters.raio + lin_mat_pes][c - parameters.raio + col_mat_pes] - r * 1000000 - g * 1000;
acumular += (r * cudaMatrizPesos[lin_mat_pes][col_mat_pes]);
acumulag += (g * cudaMatrizPesos[lin_mat_pes][col_mat_pes]);
acumulab += (b * cudaMatrizPesos[lin_mat_pes][col_mat_pes]);
}
}
nr = acumular;
ng = acumulag;
nb = acumulab;
cudaNewimage[l][c] = nr * 1000000 + ng * 1000 + nb;
}
}
}
int main() {
FILE *arqin;
FILE *arqout;
char narqin[MAX_NAME] = "c:\\temp\\reddead.ppm";
char narqout[MAX_NAME] = "c:\\temp\\reddead2.ppm";
char key[128];
int i, j, max, r, g, b; //auxiliares
int Blocks = 1;
int ThreadsPerBlock = 1;
struct param parameters;
cudaError_t cudaStatus;
parameters.totalthreads = Blocks * ThreadsPerBlock;
printf("Qual raio?\n");
scanf("%d", ¶meters.raio);
printf("Arquivo de entrada: %s\n", narqin);
arqin = fopen(narqin, "r");
if (arqin == NULL) {
printf("Erro na abertura do arquivo %s\n", narqin);
return 1;
}
printf("Arquivo de saida: %s\n", narqout);
arqout = fopen(narqout, "w");
if (arqout == NULL) {
printf("Erro na abertura do arquivo %s\n", narqin);
return 1;
}
fscanf(arqin, "%s", key);//leio cabealho
fprintf(arqout, "%s\n", key);//j escrevo o cabealho no novo arquivo
printf("Arquivo tipo: %s \n", key);
fscanf(arqin, "%d %d %d", ¶meters.colunas, ¶meters.linhas, &max);//leio mais dados do cabealho
fprintf(arqout, "%d %d \n%d", parameters.colunas, parameters.linhas, max);//j escrevo esses dados no novo arquivo
printf("Colunas = %d \nLinhas = %d \n", parameters.colunas, parameters.linhas);
//vamos definir o tamanho para cada um
parameters.tamanho = parameters.linhas / parameters.totalthreads;
if ((parameters.linhas % parameters.totalthreads) > 0){
parameters.sobra = parameters.linhas % parameters.totalthreads;
}
else {
parameters.sobra = 0;
}
printf("Tamanho %d\n", parameters.tamanho);
//por enquanto nao vamos aceitar imagem com apenas uma linha
if (parameters.linhas < parameters.totalthreads){
printf("Mais threads do que dados %s\n", narqin);
return 0;
}
int **oldimage = AlocaMatriz(parameters.linhas, parameters.colunas);
int **newimage = AlocaMatriz(parameters.linhas, parameters.colunas);
matrizpesos = AlocaMatrizDouble(parameters.raio * 2 + 1, parameters.raio * 2 + 1);
InicializaMatrizPesos(parameters.raio);
for (i = 0; i <= parameters.linhas - 1; i++)
for (j = 0; j <= parameters.colunas - 1; j++) {
fscanf(arqin, " %d %d %d ", &r, &g, &b);
//printf("RGB: %d %d %d \n", r, g, b);
oldimage[i][j] = r * 1000000 + g * 1000 + b;
/*
rgb = oldimage[i][j];
nr = rgb/1000000;
ng = (rgb-r*1000000)/1000;
nb = rgb-r*1000000-g*1000;
if ((nr != r) || (ng != g) || (nb != b)) printf("errooou");
printf("Valor: %d\n", rgb);
printf("Valor R: %d\n", r);
printf("Valor G: %d\n", g);
printf("Valor B: %d\n", b);
*/
}
// aloca a memria no device
int size_m_int = parameters.linhas*parameters.colunas*sizeof(int); // tamanho da memria que ser aolocado para as matrizes
int size_m_double = (parameters.raio * 2 + 1) * (parameters.raio * 2 + 1) * sizeof(double); // tamanho da memria que ser aolocado para a matriz de pesos
int **doldimage, **dnewimage;
double **dmatrizpesos;
printf("1...alocando doldimage na GPU...");
cudaStatus = cudaMalloc((void**)&doldimage, size_m_int);
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc failed!");
return 1;
}
printf("2...alocando dnewimage na GPU...");
cudaStatus = cudaMalloc((void**)&dnewimage, size_m_int);
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc failed!");
return 1;
}
printf("3...alocando dmatrizpesos na GPU...");
cudaStatus = cudaMalloc((void**)&dmatrizpesos, size_m_double);
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc failed!");
return 1;
}
// copia as matrizes do host para o device
printf("4...copiando oldimage para GPU...");
cudaStatus = cudaMemcpy(doldimage, oldimage, size_m_int, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
printf("cudaMemcpy failed!");
return 1;
}
printf("5...copiando matrizpesos para GPU...");
cudaStatus = cudaMemcpy(dmatrizpesos, matrizpesos, size_m_double, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
printf("cudaMemcpy failed!");
return 1;
}
//executa o kernel
printf("6...rodando kernel...");
filtra<<<Blocks, ThreadsPerBlock>>>(doldimage, dnewimage, dmatrizpesos, parameters);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
printf("addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return 1;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
printf("7...sincronizando com device...");
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
printf("cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
return 1;
}
//copia matriz resultante da GPU para a CPU
printf("8...copiando dnewimage da GPU para processador...");
cudaStatus = cudaMemcpy(newimage, dnewimage, size_m_int, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
printf("cudaMemcpy failed!");
return 1;
}
//escrever novo arquivo
printf("9...escrevendo nova imagem...");
for (i = 0; i <= parameters.linhas - 1; i++){
fprintf(arqout, "\n");
for (j = 0; j <= parameters.colunas - 1; j++) {
r = newimage[i][j] / 1000000;
g = (newimage[i][j] - r * 1000000) / 1000;
b = newimage[i][j] - r * 1000000 - g * 1000;
fprintf(arqout, "%d %d %d ", r, g, b);
}
}
// for (i = 0; i <= linhas - 1; i++) for (j = 0; j <= colunas - 1; j++) printf("RGB: %d %d %d \n", newimage[i][j*3], newimage[i][j*3+1], newimage[i][j*3+2]);
printf("10...liberando matrizes...");
LiberaMatriz(parameters.linhas, parameters.colunas, oldimage);
LiberaMatriz(parameters.linhas, parameters.colunas, newimage);
LiberaMatrizDouble(parameters.raio * 2 + 1, parameters.raio * 2 + 1, matrizpesos);
cudaFree(doldimage);
cudaFree(dnewimage);
cudaFree(dmatrizpesos);
fclose(arqin);
fclose(arqout);
printf("Fim programa.\n");
return 0;
}
void InicializaMatrizPesos(int raio){
int i, j;
double e, g;
double somapesos = 0;
float sigma = raio;
for (i = 0; i < sigma * 2 + 1; i++){
//printf("\n");
for (j = 0; j < raio * 2 + 1; j++){
e = pow((float)exp(1.0), ((-1)*(pow((i - sigma), 2) + pow((j - sigma), 2)) / (2 * pow(sigma, 2))));
//printf("P(%d,%d)\n", i, j);
//printf("E = %.4f - PARTEDECIMA = %.4f\n", e, partedecima);
g = e / (2 * PI*pow(sigma, 2));
matrizpesos[i][j] = g;
somapesos += g;
//printf("P(%d,%d) = %.4f ;", i, j, g);
}
}
for (i = 0; i < sigma * 2 + 1; i++){
//printf("de novo \n");
for (j = 0; j < raio * 2 + 1; j++){
matrizpesos[i][j] = matrizpesos[i][j] / somapesos;
//printf("P(%d,%d) = %.4f ;", i, j, matrizpesos[i][j]);
}
}
//printf("somapesos = %.5f\n", somapesos);
}
int **AlocaMatriz(int lin, int col){
int **mat; /* ponteiro para a matriz */
int i; /* variavel auxiliar */
if (lin < 1 || col < 1) { /* verifica parametros recebidos */
printf("** Erro: Parametro invalido **\n");
return(NULL);
}
/* aloca as linhas da matriz */
mat = (int **)calloc(lin, sizeof(int *));
if (mat == NULL) {
printf("** Erro: Memoria Insuficiente **");
return(NULL);
}
/* aloca as colunas da matriz */
for (i = 0; i < lin; i++){
mat[i] = (int*)calloc(col, sizeof(int));
if (mat[i] == NULL) {
printf("** Erro: Memoria Insuficiente **");
return(NULL);
}
}
return(mat); /* retorna o ponteiro para a matriz */
}
int **LiberaMatriz(int lin, int col, int **mat){
int i; /* variavel auxiliar */
if (mat == NULL) return(NULL);
if (lin < 1 || col < 1){ /* verifica parametros recebidos */
printf("** Erro: Parametro invalido **\n");
return(mat);
}
for (i = 0; i<lin; i++) free(mat[i]); /* libera as linhas da matriz */
free(mat); /* libera a matriz */
return(NULL); /* retorna um ponteiro nulo */
}
double **AlocaMatrizDouble(int lin, int col){
double **mat; /* ponteiro para a matriz */
int i; /* variavel auxiliar */
if (lin < 1 || col < 1) { /* verifica parametros recebidos */
printf("** Erro: Parametro invalido **\n");
return(NULL);
}
/* aloca as linhas da matriz */
mat = (double **)calloc(lin, sizeof(double *));
if (mat == NULL) {
printf("** Erro: Memoria Insuficiente **");
return(NULL);
}
/* aloca as colunas da matriz */
for (i = 0; i < lin; i++){
mat[i] = (double*)calloc(col, sizeof(double));
if (mat[i] == NULL) {
printf("** Erro: Memoria Insuficiente **");
return(NULL);
}
}
return(mat); /* retorna o ponteiro para a matriz */
}
double **LiberaMatrizDouble(int lin, int col, double **mat){
int i; /* variavel auxiliar */
if (mat == NULL) return(NULL);
if (lin < 1 || col < 1){ /* verifica parametros recebidos */
printf("** Erro: Parametro invalido **\n");
return(mat);
}
for (i = 0; i<lin; i++) free(mat[i]); /* libera as linhas da matriz */
free(mat); /* libera a matriz */
return(NULL); /* retorna um ponteiro nulo */
} |
8,260 | #include <iostream>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#include <math.h>
#include <complex.h>
#include <cuComplex.h>
using namespace std;
cuFloatComplex *pcmul(cuFloatComplex *a, float *b, int m, int n) {
cuFloatComplex *res = new cuFloatComplex[m*n];
float real, imag;
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
real = cuCrealf(a[i*n+j]);
imag = cuCimagf(a[i*n+j]);
res[i*n+j] = make_cuFloatComplex(b[i*n+j]*real, b[i*n+j]*imag);
}
}
return res;
}
__global__ void pcmul_kernel(cuFloatComplex *res, cuFloatComplex *a, float *b, int m, int n) {
//unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<m*n) {
float real, imag;
real = cuCrealf(a[i]);
imag = cuCimagf(a[i]);
res[i] = make_cuFloatComplex(b[i]*real, b[i]*imag);
}
}
cuFloatComplex *pcmul_gpu(cuFloatComplex *a, float *b, int m, int n) {
// host
cuFloatComplex *h_res;
// device
cuFloatComplex *d_a, *d_res;
float *d_b;
struct timeval tb, te;
unsigned long long bb, e;
h_res = new cuFloatComplex[m*n];
cudaMalloc(&d_res, m*n*sizeof(cuFloatComplex));
cudaMalloc(&d_a, m*n*sizeof(cuFloatComplex));
cudaMalloc(&d_b, m*n*sizeof(float));
gettimeofday(&tb, NULL);
cudaMemcpy(d_a, a, m*n*sizeof(cuFloatComplex), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, m*n*sizeof(float), cudaMemcpyHostToDevice);
gettimeofday(&te, NULL);
bb = (unsigned long long)(tb.tv_sec) * 1000000 + (unsigned long long)(tb.tv_usec) / 1;
e = (unsigned long long)(te.tv_sec) * 1000000 + (unsigned long long)(te.tv_usec) / 1;
cout << "copy to device " << e-bb << endl;
gettimeofday(&tb, NULL);
pcmul_kernel<<<m,n>>>(d_res, d_a, d_b, m, n);
gettimeofday(&te, NULL);
bb = (unsigned long long)(tb.tv_sec) * 1000000 + (unsigned long long)(tb.tv_usec) / 1;
e = (unsigned long long)(te.tv_sec) * 1000000 + (unsigned long long)(te.tv_usec) / 1;
cout << "kernel compute " << e-bb << endl;
gettimeofday(&tb, NULL);
cudaMemcpy(h_res, d_res, m*n*sizeof(cuFloatComplex), cudaMemcpyDeviceToHost);
gettimeofday(&te, NULL);
bb = (unsigned long long)(tb.tv_sec) * 1000000 + (unsigned long long)(tb.tv_usec) / 1;
e = (unsigned long long)(te.tv_sec) * 1000000 + (unsigned long long)(te.tv_usec) / 1;
cout << "copy to host " << e-bb << endl;
cudaFree(d_res);
cudaFree(d_a);
cudaFree(d_b);
return h_res;
}
int main(int argc, char **argv) {
int m, n;
cuFloatComplex *iq, *mul;
float *h;
m = 1024;
n = 512;
iq = new cuFloatComplex[m*n];
h = new float[m*n];
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
iq[i*n+j] = make_cuFloatComplex((float)i, (float)j);
h[i*n+j] = i;
//cout << "(" << cuCrealf(iq[i*n+j]) << "," << cuCimagf(iq[i*n+j]) << ") ";
}
//cout << endl;
}
/*for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
cout << h[i*n+j] << " ";
}
cout << endl;
}*/
struct timeval tb, te;
unsigned long long b, e;
gettimeofday(&tb, NULL);
mul = pcmul(iq, h, m, n);
gettimeofday(&te, NULL);
b = (unsigned long long)(tb.tv_sec) * 1000000 + (unsigned long long)(tb.tv_usec) / 1;
e = (unsigned long long)(te.tv_sec) * 1000000 + (unsigned long long)(te.tv_usec) / 1;
cout << e-b << endl;
gettimeofday(&tb, NULL);
mul = pcmul_gpu(iq, h, m, n);
gettimeofday(&te, NULL);
b = (unsigned long long)(tb.tv_sec) * 1000000 + (unsigned long long)(tb.tv_usec) / 1;
e = (unsigned long long)(te.tv_sec) * 1000000 + (unsigned long long)(te.tv_usec) / 1;
cout << e-b << endl;
/*for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
cout << "(" << cuCrealf(mul[i*n+j]) << "," << cuCimagf(mul[i*n+j]) << ") ";
}
cout << endl;
}*/
delete[] iq;
delete[] h;
delete[] mul;
return 0;
}
|
8,261 | #include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
namespace graphdl
{
namespace core
{
namespace cuda
{
__global__ void setupKernel(curandState* state, size_t seed)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(seed, id, 0, &state[id]);
}
__global__ void uniformRandomKernel(curandState* state, float* memory,
size_t size, float min, float max)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) memory[id] = curand_uniform(state + id) * (max - min) + min;
}
__global__ void normalRandomKernel(curandState* state, float* memory,
size_t size, float mean, float stddev)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) memory[id] = stddev * curand_normal(state + id) + mean;
}
void uniformRandom(float* memory, size_t size, float min, float max,
size_t seed)
{
const int BLOCK_SIZE = 256;
const int NUM_BLOCKS = (size + BLOCK_SIZE - 1) / BLOCK_SIZE;
curandState* states;
cudaMalloc(&states, size * sizeof(curandState));
setupKernel<<<NUM_BLOCKS, BLOCK_SIZE>>>(states, seed);
uniformRandomKernel<<<NUM_BLOCKS, BLOCK_SIZE>>>(states, memory, size, min,
max);
cudaDeviceSynchronize();
}
void normalRandom(float* memory, size_t size, float mean, float stddev,
size_t seed)
{
const int BLOCK_SIZE = 256;
const int NUM_BLOCKS = (size + BLOCK_SIZE - 1) / BLOCK_SIZE;
curandState* states;
cudaMalloc(&states, size * sizeof(curandState));
setupKernel<<<NUM_BLOCKS, BLOCK_SIZE>>>(states, seed);
normalRandomKernel<<<NUM_BLOCKS, BLOCK_SIZE>>>(states, memory, size, mean,
stddev);
cudaDeviceSynchronize();
}
} // namespace cuda
} // namespace core
} // namespace graphdl
|
8,262 | #include "includes.h"
__global__ void FullyConnectedUpdateParametersKernel( float *weightPtr, float *biasPtr, float *weightLearningRatePtr, float *biasLearningRatePtr, float *weightGradientPtr, float *biasGradientPtr, float *dropoutMaskPtr, int prevLayerSize, int thisLayerSize )
{
// i: prev. layer neuron id
// j: current layer neuron id
int i;
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
if (!dropoutMaskPtr[j])
{
// update weights
int index = j;
for (i = 0; i < prevLayerSize; i++)
{
weightPtr[index] -= weightLearningRatePtr[index] * weightGradientPtr[index];
index += thisLayerSize;
}
// update bias
biasPtr[j] -= biasLearningRatePtr[j] * biasGradientPtr[j];
}
}
} |
8,263 | /* ==================================================================
Programmer: Arunbalaji Prithiviraj (U#80066848) arunbalaji@mail.usf.edu
The basic SDH algorithm implementation for 3D data
To compile: nvcc proj2-arunbalaji.cu -o output in the rc machines
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
float *h_x;
float *h_y;
float *h_z;
typedef struct hist_entry{
unsigned int d_cnt; /* need a unsigned int data type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
unsigned int PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
float PDH_res; /* value of w */
/*
CUDA Kernel. It computes Spatial distance histogram for 3D points.
I used concept of shared memory to reduce memory latency in atomic operation.
Finally shared memory is loaded to global memory for final result.
*/
__global__ void SDH (float* x, float*y, float*z ,bucket* hist, const int PDH_acnt, float PDH_res,int num_buckets)
{
int tx = threadIdx.x;
int id = blockIdx.x*blockDim.x+tx;
int j,k;
int h_pos;
int half = PDH_acnt/2;
int odd = PDH_acnt%2;
extern __shared__ bucket shared_h[];
shared_h[tx].d_cnt = 0;
__syncthreads();
if(id >= PDH_acnt) return;
for(k = 1; k <= half; k++)
{
j=(id+k)%(PDH_acnt);
h_pos=(int) sqrt((x[id] - x[j])*(x[id]-x[j]) + (y[id] - y[j])*(y[id] - y[j]) + (z[id] - z[j])*(z[id] - z[j]))/PDH_res;
if(!odd&&k==half&&id>=half) continue;
atomicAdd(&shared_h[h_pos].d_cnt,1);
}
__syncthreads();
for(k = 0; k<num_buckets&&tx== 0;k++)
atomicAdd(&hist[k].d_cnt, shared_h[k].d_cnt);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(bucket* output){
int i;
unsigned int total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15u ", output[i].d_cnt);
total_cnt += output[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%u \n", total_cnt);
else printf("| ");
}
}
int main(int argc, char **argv)
{
if(argc != 4)
{
printf("Missing inputs, Try again!\n");
exit(0);
}
int i;
bucket* h_histogram;
/*Device variable declaration*/
float *d_x, *d_y,*d_z;
bucket* d_histogram;
cudaEvent_t start, stop;
/*command line arguments */
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
int threads = atoi(argv[3]);
/*Configuration parameters for the device*/
dim3 dimGrid(ceil(PDH_acnt/threads)+1,1,1);
dim3 dimBlock(threads,1,1);
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
/*CPU memory allocation */
h_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
h_x = (float*)malloc(sizeof(float)*PDH_acnt);
h_y = (float*)malloc(sizeof(float)*PDH_acnt);
h_z = (float*)malloc(sizeof(float)*PDH_acnt);
/*Device memory allocation */
cudaMalloc(&d_x,sizeof(float)*PDH_acnt);
cudaMalloc(&d_y,sizeof(float)*PDH_acnt);
cudaMalloc(&d_z,sizeof(float)*PDH_acnt);
cudaMalloc(&d_histogram,sizeof(bucket)*num_buckets);
cudaMemset(d_histogram,0,sizeof(bucket)*num_buckets);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
h_x[i] = ((float)(rand()) / RAND_MAX) * BOX_SIZE;
h_y[i]= ((float)(rand()) / RAND_MAX) * BOX_SIZE;
h_z[i] = ((float)(rand()) / RAND_MAX) * BOX_SIZE;
}
/* Start counting time */
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
/* Copy host array to device*/
cudaMemcpy(d_x,h_x,sizeof(float)*PDH_acnt,cudaMemcpyHostToDevice);
cudaMemcpy(d_y,h_y,sizeof(float)*PDH_acnt,cudaMemcpyHostToDevice);
cudaMemcpy(d_z,h_z,sizeof(float)*PDH_acnt,cudaMemcpyHostToDevice);
/* Excute the kernel */
SDH<<<dimGrid,dimBlock,sizeof(bucket)*threads>>>(d_x,d_y,d_z,d_histogram,PDH_acnt,PDH_res,num_buckets);
/*Copy array back to host */
cudaMemcpy(h_histogram,d_histogram,sizeof(bucket)*num_buckets,cudaMemcpyDeviceToHost);
/* check the total running time */
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elaspsedTime;
cudaEventElapsedTime(&elaspsedTime,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
/* print out the histogram */
output_histogram(h_histogram);
printf("\n******** Total Running Time of Kernel = %0.5f ms *******\n",elaspsedTime);
/*Release Device Memory*/
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
cudaFree(d_histogram);
/*Release CPU Memory*/
free(h_x);
free(h_y);
free(h_z);
free(h_histogram);
return 0;
}
|
8,264 | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
/*********************************************************************************************
//To Compile:
// nvcc -o PasswordCrackingByCuda PasswordCrackingByCuda.cu
// To Run: ./PasswordCrackingByCuda
*****************************************************************************************/
__device__ int is_a_match(char *check) { // compares each password check
//Initilizing passwords
char password1[]="AN9810";
char password2[]="JI2205";
char password3[]="TM5298";
char password4[]="UN6085";
char *c1 = check;
char *c2 = check;
char *c3 = check;
char *c4 = check;
char *pw1 = password1;
char *pw2 = password2;
char *pw3 = password3;
char *pw4 = password4;
while(*c1 == *pw1){
if(*c1 == '\0'){
return 1;
}
c1++;
pw1++;
}
while(*c2 == *pw2){
if(*c2 == '\0'){
return 1;
}
c2++;
pw2++;
}
while(*c3 == *pw3){
if(*c3 == '\0'){
return 1;
}
c3++;
pw3++;
}
while(*c4 == *pw4){
if(*c4 == '\0'){
return 1;
}
c4++;
pw4++;
}
return 0; // returns 0
}
__global__ void kernel() {
char check[7];
check[6] = '\0';
char i, j, k, l;
int m = blockIdx.x+65;
int n = threadIdx.x+65;
char firstValue = m;
char secondValue = n;
check[0] = firstValue;
check[1] = secondValue;
for(i='0';i<='9';i++){
for(j='0'; j<='9'; j++){
for(k='0'; k<='9'; k++){
for(l='0'; l<='9'; l++){
check[2] = i;
check[3] = j;
check[4] = k;
check[5] = l;
if(is_a_match(check)){
printf("Password successfully cracked: %s\n", check);
}
//else {
//printf("tried: %s\n", check);
//}
}
}
}
}
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 )
{
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
//main
int main(int argc, char *argv[])
{
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26, 26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,(time_elapsed/1.0e9));
return 0;
}
|
8,265 | #include <thrust/version.h>
#include <iostream>
#include <thrust/device_vector.h>
int main(void)
{
int major = THRUST_MAJOR_VERSION;
int minor = THRUST_MINOR_VERSION;
std::cout << "Thrust v" << major << "." << minor << std::endl;
// put three 1s in a device_vector
thrust::device_vector<int> vec(5,0);
vec[1] = 1;
vec[3] = 1;
vec[4] = 1;
int sum = thrust::reduce(vec.begin(), vec.end(), (int) 0, thrust::plus<int>());
std::cout << "Sum:" << sum << std::endl;
// Multiply
thrust::device_vector<float> d1(5, 0);
d1[0] = 1.2;
d1[1] = 2.3;
d1[2] = 3.4;
d1[3] = 4.5;
d1[4] = 5.6;
float mul = thrust::reduce(d1.begin(), d1.end(), (float) 1, thrust::multiplies<float>());
std::cout << "Mul:" << mul << std::endl;
return 0;
}
|
8,266 | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
__device__ int cracks(char * passcrack){
char pswd1[]="RD1234";
char pswd2[]="MN4567";
char pswd3[]="SS9867";
char pswd4[]="CD5493";
char *m1 = passcrack;
char *m2 = passcrack;
char *m3 = passcrack;
char *m4 = passcrack;
char *p1 = pswd1;
char *p2 = pswd2;
char *p3 = pswd3;
char *p4 = pswd4;
while(*m1 == *p1){
if(*m1 == '\0'){
return 1;
}
m1++;
p1++;
}
while(*m2 == *p2){
if(*m2 == '\0'){
return 1;
}
m2++;
p2++;
}
while(*m3 == *p3){
if(*m3 == '\0'){
return 1;
}
m3++;
p3++;
}
while(*m4 == *p4){
if(*m4 == '\0'){
return 1;
}
m4++;
p4++;
}
return 0;
}
__global__ void cracks() {
char word[26] = {'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'};
char numb[10] = {'0','1','2','3','4','5','6','7','8','9'};
char passcrack[7];
passcrack[6] = '\0';
int m, n, o, p;
for(m=0;m<10;m++){
for(n=0; n<10; n++){
for(o=0; o<10; o++){
for(p=0; p<10; p++){
passcrack[0] = word[blockIdx.x];
passcrack[1] = word[threadIdx.x];
passcrack[2] = numb[m];
passcrack[3] = numb[n];
passcrack[4] = numb[o];
passcrack[5] = numb[p];
if(cracks(passcrack)){
printf("Password successfully cracked: %s\n", passcrack);
}
}
}
}
}
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 )
{
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char *argv[])
{
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cracks <<<26, 26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
8,267 | #include <math.h>
#define N 64
float scale(int i, int n)
{
return ((float)i) / (n - 1);
}
float distance(float x1, float x2)
{
return fabsf(x2 - x1);
}
int main()
{
float out[N] = { 0.0f };
float ref = .5f;
for (int i = 0; i < N; ++i)
{
float x = scale(i, N);
out[i] = distance(x, ref);
}
return 0;
} |
8,268 | #include <stdio.h>
int main(void){
cudaDeviceProp prop;
cudaDeviceProp prop1;
int count, device;
cudaGetDeviceCount (&count);
for (int i=0; i < count; i++){
cudaGetDeviceProperties(&prop, i);
printf("Device %d, %s: CC: %d.%d\n", i, prop.name, prop.major, prop.minor);
}
memset(&prop1, 0, sizeof(cudaDeviceProp));
prop1.major = 2;
prop1.minor = 1;
device = 1;
cudaChooseDevice(&device, &prop1);
cudaSetDevice(device);
cudaGetDeviceProperties(&prop, device);
printf("\n Device chosen %d, %s: CC: %d.%d\n", device, prop.name, prop.major, prop.minor);
return 0;
}
|
8,269 | #include "includes.h"
__global__ void addScalarInArrayInPlace(float* in, float* add, float scale, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) in[tid] += add[0] * scale;
} |
8,270 | #include <stdio.h>
int main(int argc, char* argv[]) {
size_t *free, *total;
int gpu_id;
if (argc == 1) {
fputs("Error: GPU id number is required.\n", stderr);
exit(1);
}
gpu_id = atoi(argv[1]);
cudaSetDevice(gpu_id);
cudaMemGetInfo((size_t*)&free, (size_t*)&total);
printf ("\ttotal\t\tfree\t\tused\n");
printf ("GPU %d\t%lu\t%lu\t%lu\n", gpu_id, (size_t)total, (size_t)free, (size_t)total-(size_t)free);
}
|
8,271 | #include <cstdio>
#include <climits>
#define SERIAL_SCALE 1
#define SERIAL_PART (1<<SERIAL_SCALE)
extern "C" {
/**
* @param input Cała tablica do posortowania
* @param output Wynik: Wszystkie prefix sum z kolejnych bloków
* @param args args[0]=pozycja bitu (0..30)
*/
__global__
void kernelMain(int *input, int *output, int *sumGlobal, int bitMask){
//int thid = (blockIdx.x * blockDim.x) + threadIdx.x; // Globalny ID wątku (mało przydatne)
__shared__ int mem[SERIAL_PART][1024+3];
int baseOffset=(blockIdx.x * blockDim.x)*SERIAL_PART; // Przesunięcie obecnego ciągu bloków
for(int i=0;i<SERIAL_PART;++i)
{
mem[i][threadIdx.x]= ((input[baseOffset+i*blockDim.x+threadIdx.x]&bitMask)==0);
}
__syncthreads();
for(int shift=1;shift<1024;shift*=2)
{
//scan
for(int i=0;i<SERIAL_PART;++i)
{
int v=mem[i][threadIdx.x];
if(threadIdx.x>=shift)
{
v+=mem[i][threadIdx.x-shift];
}
__syncthreads();
mem[i][threadIdx.x]=v;
}
}
//__syncthreads();
for(int i=0;i<SERIAL_PART;++i)
{
output[baseOffset+i*blockDim.x+threadIdx.x]=mem[i][threadIdx.x];
if(threadIdx.x == blockDim.x-1)
{
sumGlobal[blockIdx.x * SERIAL_PART + i + 1] = mem[i][threadIdx.x];
}
}
}
/**
* @param input Wejściowa tablica liczb
* @param output Wyjściowa tablica liczb
* @param sumLocal Tablica prefix sum kolejnych bloków
* @param sumGlobal Tablica prefix sum (zewnętrzna) ostatnich sum z kolejnych bloków
* @param args args[0]=pozycja bitu (0..30), args[1]=pozycja pierwszej liczby z 1 w output
* @param
*/
__global__
void kernelShuffle(int *input,int *output,int *sumLocal,int *sumGlobal,int bitMask,int sumLast)
{
int baseOffset=(blockIdx.x * blockDim.x)*SERIAL_PART; // Przesunięcie obecnego ciągu bloków
__shared__ int sumGlobalS[SERIAL_PART][1024];
for(int i=0;i<SERIAL_PART;++i)
{
int in=baseOffset+i*blockDim.x+threadIdx.x;
sumGlobalS[i][threadIdx.x]=sumGlobal[blockIdx.x*SERIAL_PART+i]+sumLocal[in];
}
__syncthreads();
for(int i=0;i<SERIAL_PART;++i)
{
int in=baseOffset+i*blockDim.x+threadIdx.x;
if((input[in]&bitMask)==0)
{
//printf("%d -> [%d]\n",input[in],sumLocal[in]+sumGlobal[blockIdx.x*SERIAL_PART+i]-1);
output[sumGlobalS[i][threadIdx.x]-1]=input[in];
}
else
{
//printf("%d -> [%d]\n",input[in],args[1]+in-(sumLocal[in]+sumGlobal[blockIdx.x*SERIAL_PART+i]));
output[sumLast+in-(sumGlobalS[i][threadIdx.x])]=input[in];
}
}
}
}
|
8,272 | #include <stdio.h>
#include <cuda_runtime.h>
// #include <helper_cuda.h>
#define N 1000000
__global__ void addVectors(int *a, int *b, int *c){
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += gridDim.x * blockDim.x)
c[i] = a[i] + b[i];
}
int main(void){
cudaError_t err = cudaSuccess;
size_t size = N * sizeof(int);
int *a, *b, *c;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
for(int i = 0; i < N; i++){
a[i] = rand() % 10;
b[i] = rand() % 10;
}
size_t threads_per_block = 256;
size_t number_of_blocks = (N + threads_per_block - 1) / threads_per_block;
addVectors<<<number_of_blocks, threads_per_block>>>(a, b, c);
if ((err = cudaGetLastError()) != cudaSuccess){
fprintf(stderr, "Failed to launch kernel: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
for(int i = 0; i < N; i++)
printf("%d ", c[i]);
printf("\n");
printf("Done\n");
cudaFree(a);
cudaFree(b);
cudaFree(c);
return 0;
} |
8,273 | #include <stdio.h>
#include <memory.h>
#include <time.h>
#define htod(dst, src, size)\
cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice)
#define dtoh(dst, src, size)\
cudaMemcpy(dst, src, size, cudaMemcpyDeviceToHost)
#define dtod(dst, src, size)\
cudaMemcpy(dst, src, size, cudaMemcpyDeviceToDevice)
#define ROW 8
#define COL 8
float ref[ROW][COL];
void initRef()
{
float *tmp = (float*)ref;
srand((unsigned int) time (NULL));
for (int i = 0; i < ROW * COL; i++)
{
tmp[i] = i;//(float)rand() / RAND_MAX;
}
}
void printData(float *data)
{
printf("printData\n");
for (int i = 0; i < ROW; i++)
{
for(int j = 0; j < COL; j++)
{
float tmp = data[i * COL + j];
printf("%9.3f ", tmp);
}
printf("\n");
}
}
void printData2(float *data, int r, int c)
{
printf("printData\n");
for (int i = 0; i < r; i++)
{
for(int j = 0; j < c; j++)
{
float tmp = data[i * c + j];
printf("%9.6f ", tmp);
}
printf("\n");
}
}
void printRef()
{
printf("printRef\n");
for (int i = 0; i < ROW; i++)
{
for(int j = 0; j < COL; j++)
{
printf("%8.6f ", ref[i][j]);
}
printf("\n");
}
}
__global__ void map(float *d_data, float *d_func)
{
int width = gridDim.x * blockDim.x;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
d_data[y * width + x] += *d_func;
}
void map()
{
float *d_mapped, *d_func;
float *h_mapped, h_func;
float tmp[ROW][COL];
size_t size = sizeof(ref);
memcpy(tmp, ref, size);
h_mapped = (float*)tmp;
printf("before : ");
printData(h_mapped);
cudaMalloc(&d_mapped, size);
cudaMalloc(&d_func, sizeof(float));
h_func = 1.5;
htod(d_func, &h_func, 1);
htod(d_mapped, h_mapped, size);
int c = 2;
dim3 blockSize = dim3(c, c);
dim3 gridSize = dim3(COL / c, ROW / c);
map<<<gridSize, blockSize>>>(d_mapped, d_func);
dtoh(h_mapped, d_mapped, size);
printf("after : ");
printData(h_mapped);
cudaFree(d_mapped);
}
__global__ void transpose(float *d_transposed, float *d_ref)
{
int width = gridDim.x * blockDim.x;
int height = gridDim.y * blockDim.x;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
d_transposed[x * height + y] = d_ref[y * width + x];
}
void transpose()
{
float *d_transposed, *d_ref;
float *h_transposed, *h_ref;
float tmp[ROW][COL];
size_t size = sizeof(ref);
memcpy(tmp, ref, size);
h_ref = (float*)tmp;
h_transposed = (float*)tmp;
printf("before : ");
printData(h_ref);
cudaMalloc(&d_transposed, size);
cudaMalloc(&d_ref, size);
htod(d_ref, h_ref, size);
int c = 2;
dim3 blockSize = dim3(c, c);
dim3 gridSize = dim3(COL / c, ROW / c);
transpose<<<gridSize, blockSize>>>(d_transposed, d_ref);
dtoh(h_transposed, d_transposed, size);
printf("after : ");
printData(h_transposed);
cudaFree(d_transposed);
cudaFree(d_ref);
}
__global__ void gather(float *d_gathered, float *d_ref)
{
int width = gridDim.x * blockDim.x;
int height = gridDim.y * blockDim.x;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int cnt = 0;
for (int yy = y - 1; yy < y + 2; yy++)
{
for (int xx = x - 1; xx < x + 2; xx++)
{
if (xx >= 0 && xx < width && yy >= 0 && yy < height)
{
d_gathered[y * width + x] += d_ref[yy * width + xx];
cnt++;
}
}
}
d_gathered[y * width + x] /= cnt;
}
void gather()
{
float *d_gathered, *d_ref;
float *h_gathered, *h_ref;
float tmp[ROW][COL];
size_t size = sizeof(ref);
memcpy(tmp, ref, size);
h_ref = (float*)tmp;
h_gathered = (float*)tmp;
printf("before : ");
printData(h_ref);
cudaMalloc(&d_ref, size);
cudaMalloc(&d_gathered, size);
htod(d_ref, h_ref, size);
cudaMemset(d_gathered, 0, size);
int c = 2;
dim3 blockSize = dim3(c, c);
dim3 gridSize = dim3(COL / c, ROW / c);
printf("run \n");
gather<<<gridSize, blockSize>>>(d_gathered, d_ref);
dtoh(h_gathered, d_gathered, size);
printf("after : ");
printData(h_gathered);
cudaFree(d_gathered);
cudaFree(d_ref);
}
__global__ void scatter(float *d_scattered, float *d_ref)
{
int width = gridDim.x * blockDim.x;
int height = gridDim.y * blockDim.x;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int cnt = 0;
for (int yy = y - 1; yy < y + 2; yy++)
{
for (int xx = x - 1; xx < x + 2; xx++)
{
if (xx >= 0 && xx < width && yy >= 0 && yy < height)
{
d_scattered[yy * width + xx] += d_ref[y * width + x];
cnt++;
}
}
}
d_scattered[y * width + x] /= cnt;
}
void scatter()
{
float *d_scattered, *d_ref;
float *h_scattered, *h_ref;
float tmp[ROW][COL];
size_t size = sizeof(ref);
memcpy(tmp, ref, size);
h_ref = (float*)tmp;
h_scattered = (float*)tmp;
printf("before : ");
printData(h_ref);
cudaMalloc(&d_ref, size);
cudaMalloc(&d_scattered, size);
htod(d_ref, h_ref, size);
cudaMemset(d_scattered, 0, size);
int c = 2;
dim3 blockSize = dim3(c, c);
dim3 gridSize = dim3(COL / c, ROW / c);
printf("run \n");
scatter<<<gridSize, blockSize>>>(d_scattered, d_ref);
dtoh(h_scattered, d_scattered, size);
printf("after : ");
printData(h_scattered);
cudaFree(d_scattered);
cudaFree(d_ref);
}
__global__ void stencil(float *d_stenciled, float *d_ref, float *d_window)
{
int width = gridDim.x * blockDim.x;
int height = gridDim.y * blockDim.x;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
float cnt = 0;
for (int yy = y - 1, yyy = 0; yy < y + 2; yy++, yyy++)
{
for (int xx = x - 1, xxx = 0; xx < x + 2; xx++, xxx++)
{
if (xx >= 0 && xx < width && yy >= 0 && yy < height)
{
d_stenciled[y * width + x] +=
d_ref[yy * width + xx] *
d_window[yyy * width + xxx];
cnt += 1 * d_window[yyy * width + xxx];
}
}
}
d_stenciled[y * width + x] /= cnt;
}
void stencil()
{
float *d_stenciled, *d_ref, *d_window;
float *h_stenciled, *h_ref, *h_window;
float tmp[ROW][COL];
float window[3][3];
size_t size = sizeof(ref);
memcpy(tmp, ref, size);
h_ref = (float*)tmp;
h_stenciled = (float*)tmp;
h_window = (float*)window;
window[0][1] = 0.2;
window[1][0] = 0.2;
window[1][1] = 0.2;
window[1][2] = 0.2;
window[2][1] = 0.2;
printf("before : ");
printData(h_ref);
cudaMalloc(&d_ref, size);
cudaMalloc(&d_stenciled, size);
cudaMalloc(&d_window, sizeof(window));
htod(d_ref, h_ref, size);
htod(d_window, h_window, sizeof(window));
cudaMemset(d_stenciled, 0, size);
int c = 2;
dim3 blockSize = dim3(c, c);
dim3 gridSize = dim3(COL / c, ROW / c);
printf("run \n");
stencil<<<gridSize, blockSize>>>(d_stenciled, d_ref, d_window);
dtoh(h_stenciled, d_stenciled, size);
printf("after : ");
printData(h_stenciled);
cudaFree(d_stenciled);
cudaFree(d_ref);
}
__global__ void reduce(float *d_reduced, float *d_ref)
{
int width = gridDim.x * blockDim.x;
//int height = gridDim.y * blockDim.x;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int xx = blockIdx.x * blockDim.x;
int yy = blockIdx.y * blockDim.y;
__shared__ float data[2][2];
data[threadIdx.y][threadIdx.x] = d_ref[y * width + x];
if (yy == y && xx == x)
{
for (int i = 0; i < 4; i++)
{
d_reduced[yy/2 * (width/2) + xx/2] += ((float*)data)[i];
}
}
}
void reduce()
{
float *d_reduced, *d_ref;
float *h_reduced, *h_ref;
float tmp[ROW][COL];
size_t size = sizeof(ref);
memcpy(tmp, ref, size);
h_ref = (float*)tmp;
h_reduced = (float*) malloc (sizeof(float) * size);
printf("before : ");
printData(h_ref);
cudaMalloc(&d_ref, size);
cudaMalloc(&d_reduced, size/4);
htod(d_ref, h_ref, size);
cudaMemset(d_reduced, 0, size/4);
for (int i = ROW/2; i > 0; i /= 2)
{
dim3 blockSize = dim3(2, 2);
dim3 gridSize = dim3(i, i);
reduce<<<gridSize, blockSize>>>(d_reduced, d_ref);
printf("before : ");
free(h_reduced);
h_reduced = (float*) malloc (size/4);
dtoh(h_reduced, d_reduced, size/4);
printData2(h_reduced, i, i);
if (i == 1) break;
size /= 4;
cudaFree(d_ref);
cudaMalloc(&d_ref, size);
dtod(d_ref, d_reduced, size);
cudaFree(d_reduced);
cudaMalloc(&d_reduced, size/4);
cudaMemset(d_reduced, 0, size/4);
}
dtoh(h_reduced, d_reduced, 1);
printf("%f\n", *h_reduced);
free(h_reduced);
cudaFree(d_reduced);
cudaFree(d_ref);
}
__global__ void hills_steele(float *d_scaned, float *d_ref, int step)
{
int width = gridDim.x * blockDim.x;
//int height = gridDim.y * blockDim.x;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
if (index < step)
{
d_scaned[index] = d_ref[index];
}
else
{
d_scaned[index] = d_ref[index] + d_ref[index - step];
}
}
// step efficiency, inclusive
void Hills_Steele_scan()
{
float *d_scaned, *d_ref;
float *h_scaned, *h_ref;
float tmp[ROW][COL];
size_t size = sizeof(ref);
memcpy(tmp, ref, size);
h_ref = (float*)tmp;
h_scaned = (float*) malloc (sizeof(float) * size);
printf("before : \n");
printData(h_ref);
cudaMalloc(&d_ref, size);
cudaMalloc(&d_scaned, size);
htod(d_ref, h_ref, size);
cudaMemset(d_scaned, 0, size);
for (int i = 1; i < size/2; i *= 2)
{
dim3 blockSize = dim3(2, 2);
dim3 gridSize = dim3(ROW/2, COL/2);
hills_steele<<<gridSize, blockSize>>>(d_scaned, d_ref, i);
dtod(d_ref, d_scaned, size);
}
dtoh(h_scaned, d_scaned, size);
printData(h_scaned);
cudaFree(d_ref);
cudaFree(d_scaned);
free(h_scaned);
}
__global__ void blelloch(float *d_scaned, float *d_ref, int step)
{
int width = gridDim.x * blockDim.x;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
if ((index % step) == (step - 1))
{
d_scaned[index] = d_ref[index] + d_ref[index - (step/2)];
}
else
{
d_scaned[index] = d_ref[index];
}
}
__global__ void blelloch_post(float *d_scaned, float *d_ref, int step)
{
int width = gridDim.x * blockDim.x;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
if ((index % step) == (step - 1))
{
d_scaned[index] = d_ref[index] + d_ref[index - (step/2)];
d_scaned[index - (step/2)] = d_ref[index];
}
}
// work efficiency, exclusive
void Blelloch_scan()
{
float *d_scaned, *d_ref;
float *h_scaned, *h_ref;
float tmp[ROW][COL];
size_t size = sizeof(ref);
memcpy(tmp, ref, size);
h_ref = (float*)tmp;
h_scaned = (float*) malloc (sizeof(float) * size);
printData(h_ref);
cudaMalloc(&d_ref, size);
cudaMalloc(&d_scaned, size);
htod(d_ref, h_ref, size);
cudaMemset(d_scaned, 0, size);
dim3 blockSize = dim3(2, 2);
dim3 gridSize = dim3(ROW/2, COL/2);
for (int i = 2; i < ROW * COL; i *= 2)
{
blelloch<<<gridSize, blockSize>>>(d_scaned, d_ref, i);
dtoh(h_scaned, d_scaned, size);
printData(h_scaned);
dtod(d_ref, d_scaned, size);
}
dtoh(h_ref, d_scaned, size);
h_ref[(size/sizeof(float))-1] = 0;
htod(d_ref, h_ref, size);
dtod(d_scaned, d_ref, size);
for (int i = ROW * COL; i > 1; i /= 2)
{
blelloch_post<<<gridSize, blockSize>>>(d_scaned, d_ref, i);
dtoh(h_scaned, d_scaned, size);
printData(h_scaned);
dtod(d_ref, d_scaned, size);
}
cudaFree(d_ref);
cudaFree(d_scaned);
free(h_scaned);
}
__global__ void mapBits(
unsigned int* d_ref,
unsigned int* d_inputVals,
unsigned int bits,
unsigned int val)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
if ((d_inputVals[index] & bits) != 0)
d_ref[index] = val;
else
d_ref[index] = 0;
}
__global__ void mapBitsRev(
unsigned int* d_ref,
unsigned int* d_inputVals,
unsigned int bits,
unsigned int val)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
if ((d_inputVals[index] & bits) != 0)
d_ref[index] = 0;
else
d_ref[index] = val;
}
__global__ void sumScan(
unsigned int *d_scanned,
unsigned int *d_ref,
int step)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < step)
{
d_scanned[index] = d_ref[index];
}
else
{
d_scanned[index] = d_ref[index] + d_ref[index - step];
}
}
__global__ void blelloch(
unsigned int *d_scanned,
unsigned int *d_ref,
int step)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
if ((index % step) == (step - 1))
d_scanned[index] = d_ref[index] + d_ref[index - (step/2)];
else
d_scanned[index] = d_ref[index];
}
__global__ void blellochPost(
unsigned int *d_scanned,
unsigned int *d_ref,
int step)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
if ( (index % step) == (step - 1))
{
d_scanned[index] = d_ref[index] + d_ref[index - (step/2)];
d_scanned[index - (step/2)] = d_ref[index];
}
}
__global__ void getOffset(
unsigned int *d_offset,
unsigned int *d_0sum,
unsigned int *d_1sum,
unsigned int *d_map)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (d_map[index] == 1)
d_offset[index] = d_1sum[index];
else
d_offset[index] = d_0sum[index];
}
__global__ void getNewIndices(
unsigned int *d_newIndices,
unsigned int *d_offset,
unsigned int *d_histPSum)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
d_newIndices[index] = d_offset[index] + d_histPSum[index];
}
#define numberOfSort 16
#define num 14
void radixSort()
{
const unsigned int h_input[16] = {
5, 10, 0, 123, 12, 3, 11, 99, 11, 20, 2, 133, 27, 67, 0, 0
};
unsigned int* d_ref;
unsigned int* d_map;
unsigned int* d_sumScaned;
unsigned int* d_histPSum;
unsigned int* d_0sum;
unsigned int* d_1sum;
unsigned int* d_offset;
unsigned int* d_newIndices;
cudaMalloc(&d_ref, sizeof(unsigned int) * numberOfSort);
cudaMalloc(&d_map, sizeof(unsigned int) * numberOfSort);
cudaMalloc(&d_sumScaned, sizeof(unsigned int) * numberOfSort);
cudaMalloc(&d_histPSum, sizeof(unsigned int) * numberOfSort);
cudaMalloc(&d_0sum, sizeof(unsigned int) * numberOfSort);
cudaMalloc(&d_1sum, sizeof(unsigned int) * numberOfSort);
cudaMalloc(&d_offset, sizeof(unsigned int) * numberOfSort);
cudaMalloc(&d_newIndices, sizeof(unsigned int) * numberOfSort);
unsigned int* h_ref = (unsigned int*) malloc (sizeof(unsigned int) * numberOfSort);
unsigned int* h_map = (unsigned int*) malloc (sizeof(unsigned int) * numberOfSort);
unsigned int* h_sumScaned = (unsigned int*) malloc (sizeof(unsigned int) * numberOfSort);
unsigned int* h_histPSum = (unsigned int*) malloc (sizeof(unsigned int) * numberOfSort);
unsigned int* h_0sum = (unsigned int*) malloc (sizeof(unsigned int) * numberOfSort);
unsigned int* h_1sum = (unsigned int*) malloc (sizeof(unsigned int) * numberOfSort);
unsigned int* h_offset = (unsigned int*) malloc (sizeof(unsigned int) * numberOfSort);
unsigned int* h_newIndices = (unsigned int*) malloc (sizeof(unsigned int) * numberOfSort);
dim3 gridSize = dim3(4);
dim3 blockSize = dim3(4);
for (unsigned int bits = 0; bits < 32; bits++)
{
memcpy(h_ref, h_input, sizeof(unsigned int) * numberOfSort);
cudaMemcpy(d_ref, h_ref,
sizeof(unsigned int) * numberOfSort, cudaMemcpyHostToDevice);
mapBits<<<gridSize, blockSize>>>(d_map, d_ref, 1 << bits, 1);
cudaMemcpy(h_sumScaned, d_map,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToHost);
for (int i = 0; i < numberOfSort; i++)
{
printf("%u\n", h_sumScaned[i]);
}
cudaMemcpy(d_ref, d_map,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToDevice);
// sum scan
for (unsigned int i = 1; i <= numberOfSort; i*=2)
{
sumScan<<<gridSize, blockSize>>>(d_sumScaned, d_ref, i);
cudaMemcpy(d_ref, d_sumScaned,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToDevice);
}
cudaMemcpy(h_sumScaned, d_sumScaned,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToHost);
cudaMemcpy(d_ref, h_ref,
sizeof(unsigned int) * numberOfSort, cudaMemcpyHostToDevice);
mapBits<<<gridSize, blockSize>>>(
d_histPSum, d_ref, 1 << bits, numberOfSort - h_sumScaned[num-1]);
cudaMemcpy(h_histPSum, d_histPSum,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToHost);
// 0 sum
memcpy(h_ref, h_input, sizeof(unsigned int) * numberOfSort);
cudaMemcpy(d_ref, h_ref,
sizeof(unsigned int) * numberOfSort, cudaMemcpyHostToDevice);
mapBitsRev<<<gridSize, blockSize>>>(d_ref, d_ref, (1<<bits), 1);
cudaMemset(d_0sum, 0, sizeof(unsigned int) * numberOfSort);
for (int i = 2; i < numberOfSort ; i *= 2)
{
blelloch<<<gridSize, blockSize>>>(d_0sum, d_ref, i);
cudaMemcpy(d_ref, d_0sum,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToDevice);
}
cudaMemcpy(h_ref, d_0sum,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToHost);
h_ref[numberOfSort-1] = 0;
cudaMemcpy(d_ref, h_ref,
sizeof(unsigned int) * numberOfSort, cudaMemcpyHostToDevice);
cudaMemcpy(d_0sum, d_ref,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToDevice);
for (int i = numberOfSort; i > 1; i /= 2)
{
blellochPost<<<gridSize, blockSize>>>(d_0sum, d_ref, i);
cudaMemcpy(d_ref, d_0sum,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToDevice);
}
cudaMemcpy(h_0sum, d_0sum,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToHost);
// 1 sum
memcpy(h_ref, h_input, sizeof(unsigned int) * numberOfSort);
cudaMemcpy(d_ref, h_ref,
sizeof(unsigned int) * numberOfSort, cudaMemcpyHostToDevice);
mapBits<<<gridSize, blockSize>>>(d_ref, d_ref, (1<<bits), 1);
cudaMemset(d_1sum, 0, sizeof(unsigned int) * numberOfSort);
for (int i = 2; i < numberOfSort ; i *= 2)
{
blelloch<<<gridSize, blockSize>>>(d_1sum, d_ref, i);
cudaMemcpy(d_ref, d_1sum,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToDevice);
}
cudaMemcpy(h_ref, d_1sum,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToHost);
h_ref[numberOfSort-1] = 0;
cudaMemcpy(d_ref, h_ref,
sizeof(unsigned int) * numberOfSort, cudaMemcpyHostToDevice);
cudaMemcpy(d_1sum, d_ref,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToDevice);
for (int i = numberOfSort; i > 1; i /= 2)
{
blellochPost<<<gridSize, blockSize>>>(d_1sum, d_ref, i);
cudaMemcpy(d_ref, d_1sum,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToDevice);
}
cudaMemcpy(h_1sum, d_1sum,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToHost);
// offset
getOffset<<<gridSize, blockSize>>>(d_offset, d_0sum, d_1sum, d_map);
cudaMemcpy(h_offset, d_offset,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToHost);
// new Indices
getNewIndices<<<gridSize, blockSize>>>(d_newIndices, d_offset, d_histPSum);
cudaMemcpy(h_newIndices, d_newIndices,
sizeof(unsigned int) * numberOfSort, cudaMemcpyDeviceToHost);
printf("\n");
for (int i = 0; i < numberOfSort; i++)
{
printf("%u %u %u\n",
h_offset[i], h_histPSum[i], h_newIndices[i]);
}
break;
}
}
int main()
{
//initRef();
//map();
//transpose();
//gather();
//scatter();
//stencil();
//reduce();
//Hills_Steele_scan();
//Blelloch_scan();
radixSort();
return 0;
}
|
8,274 | #include <iostream>
#include "../include/gstack.h"
#include <thrust/device_vector.h>
#define def_dvec(t) thrust::device_vector<t>
#define to_ptr(x) thrust::raw_pointer_cast(&x[0])
using namespace std;
__global__ void test(float *output){
gpu_stl::stack<float> stk;
int idx = 0;
output[idx++] = stk.empty();
output[idx++] = stk.size();
output[idx++] = 10086;
for(int i=1;i<=20;++i){
stk.push(i*1.7);
output[idx++] = stk.empty();
output[idx++] = stk.size();
}
output[idx++] = 10086;
while(!stk.empty()){
output[idx++] = stk.empty();
output[idx++] = stk.size();
output[idx++] = stk.top();
stk.pop();
}
}
int main(){
def_dvec(float) dev_out(120, 0);
test<<<1, 1>>>(to_ptr(dev_out));
for(auto k:dev_out) cout<<k<<' ';
cout<<endl;
return 0;
}
|
8,275 | /**
* @file exclusive_prefix_sum.cu
* @date Spring 2020, revised Spring 2021
* @author Hugo De Moraes
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void exclusive_prefix_sum_gpu(int * oldSum, int * newSum, int distance, int numElements) {
// Get Thread ID
const int NUM_THREADS = blockDim.x * gridDim.x;
const int COL = blockIdx.x * blockDim.x + threadIdx.x;
const int ROW = blockIdx.y * blockDim.y + threadIdx.y;
const int FIRST_T_ID = COL + ROW * NUM_THREADS;
for(int curTID = FIRST_T_ID; curTID <= numElements; curTID += NUM_THREADS) {
if(distance == 0) {
if(curTID == 0) {
newSum[curTID] = 0;
} else {
newSum[curTID] = oldSum[curTID-1];
}
} else {
const int COMPARE_T_ID = curTID - distance > 0 ? curTID - distance : 0;
newSum[curTID] = oldSum[curTID] + oldSum[COMPARE_T_ID];
}
}
}
|
8,276 |
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <cmath>
#include <chrono>
using namespace std::chrono;
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
float normaAvg[32];
float avg[32][3];
void CustomFormulaComputation(float* res, int numClasses, uchar4 curPixel)
{
float rgb[3];
float tmp[3];
float sum, denominator;
float normaPix = sqrt((float)(curPixel.x * curPixel.x + curPixel.y * curPixel.y + curPixel.z * curPixel.z));
for (int curClass = 0; curClass < numClasses; ++curClass)
{
rgb[0] = curPixel.x * avg[curClass][0];
rgb[1] = curPixel.y * avg[curClass][1];
rgb[2] = curPixel.z * avg[curClass][2];
denominator = normaPix * normaAvg[curClass];
rgb[0] /= denominator;
rgb[1] /= denominator;
rgb[2] /= denominator;
sum = 0.0;
for (int i = 0; i < 3; ++i) tmp[i] = 0.0;
for (int i = 0; i < 3; ++i)
{
tmp[i] += rgb[0];
tmp[i] += rgb[1];
tmp[i] += rgb[2];
sum += tmp[i];
}
res[curClass] = sum;
}
}
int CustomArgMax(float* arr, int numClasses)
{
float maxValue = arr[0];
int maxPoint = 0;
for (int i = 0; i < numClasses; ++i)
{
if (arr[i] > maxValue)
{
maxValue = arr[i];
maxPoint = i;
}
}
return maxPoint;
}
void CustomSpectralAngleMethod(uchar4* pixels, int width, int height, int numClasses)
{
uchar4 curPixel;
float res[32];
for (int x = 0; x < width; ++x)
{
for (int y = 0; y < height; ++y)
{
curPixel = pixels[y * width + x];
CustomFormulaComputation(res, numClasses, curPixel);
pixels[y * width + x].w = CustomArgMax(res, numClasses);
}
}
}
__constant__ float constAVG[32][3];
__constant__ float constNormaAVG[32];
__device__ void FormulaComputation(float* res, int numClasses, uchar4 curPixel)
{
float rgb[3];
float tmp[3];
float sum, denominator;
float normaPix = sqrt((float)(curPixel.x * curPixel.x + curPixel.y * curPixel.y + curPixel.z * curPixel.z));
for (int curClass = 0; curClass < numClasses; ++curClass)
{
rgb[0] = curPixel.x * constAVG[curClass][0];
rgb[1] = curPixel.y * constAVG[curClass][1];
rgb[2] = curPixel.z * constAVG[curClass][2];
denominator = normaPix * constNormaAVG[curClass];
rgb[0] /= denominator;
rgb[1] /= denominator;
rgb[2] /= denominator;
sum = 0.0;
for (int i = 0; i < 3; ++i) tmp[i] = 0.0;
for (int i = 0; i < 3; ++i)
{
tmp[i] += rgb[0];
tmp[i] += rgb[1];
tmp[i] += rgb[2];
sum += tmp[i];
}
res[curClass] = sum;
}
}
__device__ int ArgMax(float* arr, int numClasses)
{
float maxValue = arr[0];
int maxPoint = 0;
for (int i = 0; i < numClasses; ++i)
{
if (arr[i] > maxValue)
{
maxValue = arr[i];
maxPoint = i;
}
}
return maxPoint;
}
__global__ void SpectralAngleMethod(uchar4* pixels, int width, int height, int numClasses)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int xOffset = blockDim.x * gridDim.x;
int yOffset = blockDim.y * gridDim.y;
uchar4 curPixel;
float res[32];
for (int x = idx; x < width; x += xOffset)
{
for (int y = idy; y < height; y += yOffset)
{
curPixel = pixels[y * width + x];
FormulaComputation(res, numClasses, curPixel);
pixels[y * width + x].w = ArgMax(res, numClasses);
}
}
}
int main(int argc, const char* argv[])
{
std::string input, output;
int width, height, numClasses, numPixels;
uchar4* pixels;
std::cin >> input >> output >> numClasses;
int2 coordinate;
std::vector<std::vector<int2>> samples(numClasses);
for (int i = 0; i < numClasses; ++i)
{
std::cin >> numPixels;
for (int j = 0; j < numPixels; ++j)
{
std::cin >> coordinate.x >> coordinate.y;
samples[i].emplace_back(coordinate);
}
}
FILE* file;
if ((file = fopen(input.c_str(), "rb")) == NULL)
{
std::cerr << "ERROR: something wrong with opening the file!\n";
exit(0);
}
else
{
fread(&width, sizeof(int), 1, file);
fread(&height, sizeof(int), 1, file);
if (width * height > 400000000)
{
std::cerr << "ERROR: incorrect input.\n";
exit(0);
}
pixels = new uchar4[width * height];
fread(pixels, sizeof(uchar4), width * height, file);
fclose(file);
}
int numChannels = 3; // rgb
int maxElems = 32;
for (int i = 0; i < numClasses; ++i)
{
avg[i][0] = 0.0;
avg[i][1] = 0.0;
avg[i][2] = 0.0;
numPixels = samples[i].size();
for (int j = 0; j < numPixels; ++j)
{
coordinate.x = samples[i][j].x;
coordinate.y = samples[i][j].y;
avg[i][0] += pixels[coordinate.y * width + coordinate.x].x;
avg[i][1] += pixels[coordinate.y * width + coordinate.x].y;
avg[i][2] += pixels[coordinate.y * width + coordinate.x].z;
}
avg[i][0] /= numPixels;
avg[i][1] /= numPixels;
avg[i][2] /= numPixels;
}
for (int i = 0; i < numClasses; ++i)
{
normaAvg[i] = std::sqrt(avg[i][0] * avg[i][0] + avg[i][1] * avg[i][1] + avg[i][2] * avg[i][2]);
}
CSC(cudaMemcpyToSymbol(constAVG, avg, sizeof(float) * maxElems * numChannels));
CSC(cudaMemcpyToSymbol(constNormaAVG, normaAvg, sizeof(float) * maxElems));
uchar4* deviceRes;
CSC(cudaMalloc(&deviceRes, sizeof(uchar4) * width * height));
CSC(cudaMemcpy(deviceRes, pixels, sizeof(uchar4) * width * height, cudaMemcpyHostToDevice));
auto startt = steady_clock::now();
//custom
CustomSpectralAngleMethod(pixels, width, height, numClasses);
auto endd = steady_clock::now();
std::cout << "CPU" << std::endl;
std::cout << "time = " << ((double)duration_cast<microseconds>(endd - startt).count()) / 1000.0 << std::endl;
int xThreadCount = 32;
int yThreadCount = 32;
int xBlockCount = 32;
int yBlockCount = 32;
dim3 blockCount = dim3(xBlockCount, yBlockCount);
dim3 threadsCount = dim3(xThreadCount, yThreadCount);
cudaEvent_t start, end;
CSC(cudaEventCreate(&start));
CSC(cudaEventCreate(&end));
CSC(cudaEventRecord(start));
SpectralAngleMethod<<<blockCount, threadsCount>>>(deviceRes, width, height, numClasses);
CSC(cudaGetLastError());
CSC(cudaEventRecord(end));
CSC(cudaEventSynchronize(end));
float t;
CSC(cudaEventElapsedTime(&t, start, end));
CSC(cudaEventDestroy(start));
CSC(cudaEventDestroy(end));
printf("GPU\n");
printf("time = %f\n", t);
printf("blocks = (%d, %d)\n", xBlockCount, yBlockCount);
printf("threads = (%d, %d)\n", xThreadCount, yThreadCount);
CSC(cudaMemcpy(pixels, deviceRes, sizeof(uchar4) * width * height, cudaMemcpyDeviceToHost));
if ((file = fopen(output.c_str(), "wb")) == NULL)
{
std::cerr << "ERROR: something wrong with opening the file.";
exit(0);
}
else
{
fwrite(&width, sizeof(int), 1, file);
fwrite(&height, sizeof(int), 1, file);
fwrite(pixels, sizeof(uchar4), width * height, file);
fclose(file);
}
CSC(cudaFree(deviceRes));
delete[] pixels;
return 0;
} |
8,277 | #include "includes.h"
__device__ uint scanLocalMem(const uint val, uint* s_data)
{
// Shared mem is 512 uints long, set first half to 0
int idx = threadIdx.x;
s_data[idx] = 0.0f;
__syncthreads();
// Set 2nd half to thread local sum (sum of the 4 elems from global mem)
idx += blockDim.x; // += 256
// Some of these __sync's are unnecessary due to warp synchronous
// execution. Right now these are left in to be consistent with
// opencl version, since that has to execute on platforms where
// thread groups are not synchronous (i.e. CPUs)
uint t;
s_data[idx] = val; __syncthreads();
t = s_data[idx - 1]; __syncthreads();
s_data[idx] += t; __syncthreads();
t = s_data[idx - 2]; __syncthreads();
s_data[idx] += t; __syncthreads();
t = s_data[idx - 4]; __syncthreads();
s_data[idx] += t; __syncthreads();
t = s_data[idx - 8]; __syncthreads();
s_data[idx] += t; __syncthreads();
t = s_data[idx - 16]; __syncthreads();
s_data[idx] += t; __syncthreads();
t = s_data[idx - 32]; __syncthreads();
s_data[idx] += t; __syncthreads();
t = s_data[idx - 64]; __syncthreads();
s_data[idx] += t; __syncthreads();
t = s_data[idx - 128]; __syncthreads();
s_data[idx] += t; __syncthreads();
return s_data[idx-1];
}
__global__ void scan(uint *g_odata, uint* g_idata, uint* g_blockSums, const int n, const bool fullBlock, const bool storeSum)
{
__shared__ uint s_data[512];
// Load data into shared mem
uint4 tempData;
uint4 threadScanT;
uint res;
uint4* inData = (uint4*) g_idata;
const int gid = (blockIdx.x * blockDim.x) + threadIdx.x;
const int tid = threadIdx.x;
const int i = gid * 4;
// If possible, read from global mem in a uint4 chunk
if (fullBlock || i + 3 < n)
{
// scan the 4 elems read in from global
tempData = inData[gid];
threadScanT.x = tempData.x;
threadScanT.y = tempData.y + threadScanT.x;
threadScanT.z = tempData.z + threadScanT.y;
threadScanT.w = tempData.w + threadScanT.z;
res = threadScanT.w;
}
else
{ // if not, read individual uints, scan & store in lmem
threadScanT.x = (i < n) ? g_idata[i] : 0.0f;
threadScanT.y = ((i+1 < n) ? g_idata[i+1] : 0.0f) + threadScanT.x;
threadScanT.z = ((i+2 < n) ? g_idata[i+2] : 0.0f) + threadScanT.y;
threadScanT.w = ((i+3 < n) ? g_idata[i+3] : 0.0f) + threadScanT.z;
res = threadScanT.w;
}
res = scanLocalMem(res, s_data);
__syncthreads();
// If we have to store the sum for the block, have the last work item
// in the block write it out
if (storeSum && tid == blockDim.x-1) {
g_blockSums[blockIdx.x] = res + threadScanT.w;
}
// write results to global memory
uint4* outData = (uint4*) g_odata;
tempData.x = res;
tempData.y = res + threadScanT.x;
tempData.z = res + threadScanT.y;
tempData.w = res + threadScanT.z;
if (fullBlock || i + 3 < n)
{
outData[gid] = tempData;
}
else
{
if ( i < n) { g_odata[i] = tempData.x;
if ((i+1) < n) { g_odata[i+1] = tempData.y;
if ((i+2) < n) { g_odata[i+2] = tempData.z; } } }
}
} |
8,278 | #include<stdio.h>
__global__ void kernal1(float* d_a){
float a;
float b;
a = b = 0.0;
int nid = blockDim.x*blockIdx.x + threadIdx.x;
if (nid%2 == 0){
a = 100.0;
}else{
b = 200.0;
}
d_a[nid] = a+b;
};
int main(int argc, char** argv){
int const n = 1<<16;
int mSize = n*sizeof(float);
float* d_a;
cudaMalloc((void**)&d_a, mSize);
int xBlock = 256;
kernal1<<<n/xBlock, xBlock>>>(d_a);
cudaDeviceSynchronize();
return 0;
}
|
8,279 | typedef struct {
int nelm;
double dval;
double* arra;
double* arrb;
double* arrc;
} custom;
__global__ void cuda_structop(custom *ctm) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < ctm->nelm)
ctm->arrc[i] = ctm->arra[i] + ctm->arrb[i] + ctm->dval;
};
extern "C" void structop(int nthread, custom *ctm, void *gp) {
int nblock = (ctm->nelm + nthread - 1) / nthread;
cuda_structop<<<nblock, nthread>>>((custom *)gp);
cudaThreadSynchronize();
};
|
8,280 | #include<stdio.h>
#include<cuda_runtime.h>
#include<stdlib.h>
__global__ void reduce_kernel(float *d_out,float *d_in)
{
//Size of shared memory is set by third parameter of kernel launch
extern __shared__ float shared_array[];
int globalThreadId = threadIdx.x + blockDim.x*blockIdx.x;
int blockThreadId = threadIdx.x;
shared_array[blockThreadId] = d_in[globalThreadId];
int s;
//Sync to ensure full shared_array is loaded
__syncthreads();
//Actual reduction operation
for(s = blockDim.x/2;s>0;s/=2)
{
if(blockThreadId<s)
shared_array[blockThreadId]+=shared_array[blockThreadId+s];
__syncthreads();
}
//Output of reduction is written to first index of global memory
if(blockThreadId==0)
d_out[blockIdx.x] = shared_array[0];
}
void reduce(float *d_in,float *d_intermediate,float *d_out,int array_size)
{
int threads = 256;
int blocks = array_size/threads;
reduce_kernel<<<blocks,threads,threads*sizeof(float)>>>(d_intermediate,d_in);
//Results of all blocks are stored in one block, which has to be reduced
threads = blocks;
blocks = 1;
reduce_kernel<<<blocks,threads,threads*sizeof(float)>>>(d_out,d_intermediate);
}
int main()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if(!deviceCount){
fprintf(stderr,"No devices supporting cuda\n");
exit(EXIT_FAILURE);
}
int deviceId = 0;
cudaSetDevice(deviceId);
const int ARRAY_SIZE = 1024;
const int ARRAY_BYTES = ARRAY_SIZE*sizeof(float);
float h_in[ARRAY_SIZE];
int i;
for(i=0;i<ARRAY_SIZE;i++)
h_in[i]=i;
float *d_in,*d_out,*d_intermediate;
cudaMalloc((void**)&d_in,ARRAY_BYTES);
cudaMalloc((void**)&d_intermediate,ARRAY_BYTES);
cudaMalloc((void**)&d_out,sizeof(float));
cudaMemcpy(d_in,h_in,ARRAY_BYTES,cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Launch the reduce kernel
cudaEventRecord(start,0);
reduce(d_in,d_intermediate,d_out,ARRAY_SIZE);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
float h_out;
cudaMemcpy(&h_out,d_out,sizeof(float),cudaMemcpyDeviceToHost);
printf("Sum of all array elements is %f\nElapsed time is %f\n",h_out,elapsedTime);
cudaFree(d_in);
cudaFree(d_intermediate);
cudaFree(d_out);
return 0;
} |
8,281 | #define SIZE 10000
namespace {
// We need this global variable to ensure that nvcc does not optimize away the
// operations inside flop_test().
__device__ float accum = 0.;
__global__ void flop_test() {
float a = 0.1;
#pragma unroll
for (size_t i = 0; i < SIZE; i++) {
accum += a;
}
}
}
namespace habitat {
namespace cuda {
namespace diagnostics {
void run_flop_test(size_t num_blocks, size_t threads_per_block) {
flop_test<<<num_blocks, threads_per_block>>>();
}
}
}
}
|
8,282 | #include "includes.h"
#define SIZ 20
#define num_inp 4
using namespace std;
typedef struct edge {
int first, second;
} edges;
__global__ void dscores_kernel_init(int * y, double * dscores, int size)
{
int i = blockIdx.x;
dscores[i*size + y[i]] -= 1;
} |
8,283 | #include <stdio.h>
#include <cuda_runtime.h>
int main()
{
size_t m_free, m_total;
double* array;
cudaMemGetInfo(&m_free, &m_total);
cudaMalloc((void **)&array, 20*1024*1024*sizeof(double));
cudaMemGetInfo(&m_free, &m_total);
printf("Livre: %ld, Total %ld\n", m_free, m_total);
return 0;
}
|
8,284 | #include <iterator>
#include <iostream>
#include <vector>
#include <string>
#include <fstream>
#include <stdint.h>
#define BLOCK_SIZE 32
#define SECTION_SIZE (BLOCK_SIZE*2)
#define cudaCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
std::cerr << "Failed to run stmt " #stmt \
<< " on line " << __LINE__ \
<< ": " << cudaGetErrorString(err) << "\n"; \
exit(-1); \
} \
} while(0)
template < typename T, typename Operator >
__global__ void apply( T* input, int len, Operator op, const T* val )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < len )
input[i] = op( input[i], *val );
}
template < typename T, typename Operator >
__global__ void scan(T* input, T* output, int len, T* sums, Operator op )
{
__shared__ T tmp[ SECTION_SIZE ];
int t = threadIdx.x;
int i = blockIdx.x * SECTION_SIZE + t;
tmp[ t ] = .0;
tmp[ t+blockDim.x ] = .0;
if( i < len )
{
tmp[t] = input[i];
if( i+blockDim.x < len )
tmp[t+blockDim.x] = input[i+blockDim.x];
for( int stride = 1; stride <= BLOCK_SIZE; stride *= 2 )
{
int index = (t+1)*stride*2-1;
if( index < SECTION_SIZE )
tmp[index] = op( tmp[index], tmp[index-stride] );
__syncthreads();
}
for( int stride = BLOCK_SIZE/2; stride > 0; stride /= 2 )
{
__syncthreads();
int index = (t+1)*stride*2-1;
if( index+stride < SECTION_SIZE )
tmp[index+stride] = op( tmp[index+stride], tmp[index] );
}
__syncthreads();
#ifdef DEBUG
printf( "(%d:%d) output[%d] = tmp[%d] (%f)\n", blockIdx.x, t, i, t, tmp[t] );
#endif
output[i] = tmp[t];
if( i+blockDim.x < len )
{
#ifdef DEBUG
printf( "(%d:%d) (2) output[%d] = tmp[%d] (%f) (blockDim=%d)\n", blockIdx.x, t
, i+blockDim.x, t+blockDim.x, tmp[t+blockDim.x], blockDim.x );
#endif
output[i+blockDim.x] = tmp[t+blockDim.x];
}
}
if( t == blockDim.x - 1 && sums )
{
//printf( "(%d:%d) assigning sum (%f) to sums\n", blockIdx.x, t, tmp[SECTION_SIZE-1] );
sums[blockIdx.x] = tmp[SECTION_SIZE-1];
}
}
/**
* @input and @output must already be in device memory.
*/
template < typename T, typename Operator >
void scanner( T* input, T* output, int numElements, Operator op )
{
int numBlocks( ::ceil(static_cast<double>(numElements)/SECTION_SIZE) );
std::cout << "scanner: numBlocks: " << numBlocks << "\n";
int iterations = ::ceil(static_cast<double>(numBlocks)/0x400);
int remainder = numBlocks%0x400;
for( int itr = 0; itr < iterations; ++itr )
{
T* sums;
cudaCheck(cudaMalloc( (void**)&sums, numBlocks*sizeof(T) ));
dim3 dimGrid( itr==iterations-1? remainder : numBlocks );
scan<<< dimGrid, BLOCK_SIZE >>>( input, output, numElements, sums, op );
cudaCheck( cudaGetLastError() );
cudaDeviceSynchronize();
#ifdef DEBUG
{
std::vector<T> s( numElements );
cudaCheck(cudaMemcpy(&s[0],output,numElements*sizeof(T), cudaMemcpyDeviceToHost ));
std::cout << " ============= Temporary output =============\n";
std::copy( s.begin(), s.end()
, std::ostream_iterator<T>(std::cout," ") );
std::cout << "\n ============================================\n";
}
#endif
if( numElements > SECTION_SIZE )
{
#ifdef DEBUG
{
std::vector<T> s( numBlocks );
cudaCheck(cudaMemcpy( &s[0], sums, numBlocks*sizeof(T), cudaMemcpyDeviceToHost ));
std::cout << " =========== Sums pre scan ============= \n";
std::copy( s.begin(), s.end()
, std::ostream_iterator<T>(std::cout," ") );
std::cout << "\n ============================================\n";
}
#endif
T* scannedSums;
cudaCheck( cudaMalloc( (void**)&scannedSums, numBlocks*sizeof(T) ) );
cudaMemset( scannedSums, 0, numBlocks*sizeof(T) );
scanner( sums, scannedSums, numBlocks, op );
#ifdef DEBUG
std::vector<T> s( numBlocks );
cudaCheck(cudaMemcpy( &s[0], scannedSums, numBlocks*sizeof(T), cudaMemcpyDeviceToHost ));
std::cout << " =========== Sums post scan ============= \n";
std::copy( s.begin(), s.end()
, std::ostream_iterator<T>(std::cout," ") );
std::cout << "\n ============================================\n";
#endif
int remaining = numElements - SECTION_SIZE;
for( int i = 1; i < numBlocks; ++i )
{
dim3 dGrid( ceil((double)SECTION_SIZE/BLOCK_SIZE) );
dim3 dBlock( BLOCK_SIZE );
apply<<< dGrid, dBlock >>>
( output+(SECTION_SIZE*i)
, remaining < SECTION_SIZE ? remaining : SECTION_SIZE
, op
, scannedSums+i-1 );
cudaCheck( cudaGetLastError() );
cudaDeviceSynchronize();
remaining -= SECTION_SIZE;
}
cudaCheck( cudaFree(scannedSums) );
}
cudaCheck( cudaFree(sums) );
}
}
struct Adder
{
template < typename T >
__device__ T operator()( T a, T b ) const { return a+b; }
};
template < typename T >
std::vector<T> importVector( const std::string& filename )
{
std::ifstream ifs( filename.c_str() );
if( !ifs ) return std::vector<T>();
std::istream_iterator<T> it( ifs );
std::vector< T > v;
for( ; std::istream_iterator<T>() != it; ++it )
v.push_back( *it );
return v;
}
int main(int argc, char ** argv)
{
uint64_t* deviceInput;
uint64_t* deviceOutput;
int numElements; // number of elements in the list
std::vector<uint64_t> hostOutput;
std::vector<uint64_t> hostInput(importVector<uint64_t>(argv[1]));
numElements = (int)hostInput.size();
std::cout << "numElements: " << numElements << std::endl;
hostOutput.resize( numElements );
cudaCheck(cudaMalloc((void**)&deviceInput
, numElements*sizeof(uint64_t)));
cudaCheck(cudaMalloc((void**)&deviceOutput
, numElements*sizeof(uint64_t)));
cudaCheck(cudaMemset(deviceOutput, 0
, numElements*sizeof(uint64_t)));
cudaCheck(cudaMemcpy(deviceInput, &(hostInput[0])
, numElements*sizeof(uint64_t), cudaMemcpyHostToDevice));
std::cout << "Launching scanner.\n";
scanner( deviceInput, deviceOutput, numElements, Adder() );
cudaCheck(cudaMemcpy(&(hostOutput[0]), deviceOutput
, numElements*sizeof(uint64_t), cudaMemcpyDeviceToHost));
if( numElements < 100 )
std::copy( hostOutput.begin(), hostOutput.end()
, std::ostream_iterator<uint64_t>(std::cout,"\n") );
else std::cout << "Last element: " << hostOutput.back() << "\n";
cudaFree(deviceInput);
cudaFree(deviceOutput);
return 0;
}
|
8,285 | #include "includes.h"
__global__ void addMoreThreads(int n, float *x, float *y)
{
// Let the kernel calculate which part of the input signal to play with
int index = threadIdx.x;
int stride = blockDim.x;
// Just did this to keep the syntax similar to the previous example
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
} |
8,286 | // ##########################################################
// By Eugene Ch'ng | www.complexity.io
// Email: genechng@gmail.com
// ----------------------------------------------------------
// The ERC 'Lost Frontiers' Project
// Development for the Parallelisation of ABM Simulation
// ----------------------------------------------------------
// A Basic CUDA Application for ABM Development
//
// The program uses each threads to sum numbers in parallel
// If you test a serial program for adding large arrays, you'll
// you'll notice that GPU accelerated summation is significantly faster
// although copying data to device memory takes time
// ----------------------------------------------------------
// How to compile:
// nvcc <filename>.cu -o <outputfile>
// ##########################################################
#include <stdio.h>
#include <iostream>
using namespace std;
#define N 10
__global__ void sum(int *a, int *b, int *c)
{
int tid = threadIdx.x; // handle the data at this index
if(tid < N)
c[tid] = a[tid] + b[tid];
}
int main ( void )
{
cout << "------------ initialising device and host arrays" << endl;
int a[N], b[N], c[N]; // host arrays
int *dev_a, *dev_b, *dev_c; // device arrays
cout << "------------ initialise arrays" << endl;
for(int i=0; i<N; i++) {
a[i] = i;
b[i] = i * i;
}
cout << "------------ allocate device memory" << endl;
cudaMalloc( (void**)&dev_a, N * sizeof(int) );
cudaMalloc( (void**)&dev_b, N * sizeof(int) );
cudaMalloc( (void**)&dev_c, N * sizeof(int) );
cout << "------------ copy a and b to dev_a and dev_b" << endl;
cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
cout << "------------ calling kernel" << endl;
sum<<<1,N>>>(dev_a, dev_b, dev_c);
cout << "------------ copy results back to host" << endl;
cudaMemcpy( &c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost );
cout << "------------ printing results" << endl;
for(int i=0; i<N; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
// ---- FREE ALLOCATED KERNEL MEMORY
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
}
|
8,287 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
# include <string.h>
#include <curand.h>
#include <cuda_runtime.h>
__global__ void mover_autos(int *d_A, int *d_B, int x)
{
/* int a=0;
int b=0;
int x1=0;
int y1=0;
int u=0;*/
int i = threadIdx.x;
if (i < x)
{
d_B[i] = 3;
/* a=d_A[i];
b=d_A[i+1];
if (a==0 && b==0){x1=0;y1=0;u=0;}
else if(a==0 && b==1){x1=0;y1=1;u=0;}
else if(a==1 && b==0){x1=0;y1=1;u=1;}
else if(a==1 && b==1){x1=1;y1=1;u=0;}
if (u==1)
{
d_B[i] = x1;
d_B[i+1] = y1;
i++;
}
else{
d_B[i]=x1;
d_B[i+1]=y1;
}*/
}
/* int a1,b1;
a=d_A[x-1];
b=d_A[0];
if (a==0 && b==0){a1=0;b1=0;u=0;}
else if(a==0 && b==1){a1=0;b1=1;u=0;}
else if(a==1 && b==0){a1=0;b1=1;u=1;}
else if(a==1 && b==1){a1=1;b1=1;u=0;}
if(u==1)
{
d_B[x-1]=a1;
d_B[0]=b1; //guardamos los resultados en el result
}*/
}
int main(int argc, char *argv[])
{
clock_t start_t, end_t;
double duration;
srand (time(NULL));
int parametro,x;
parametro = atoi(argv[1]);
x = parametro;
int p,cantidadinter;
p = atoi(argv[2]);
cantidadinter = p;
//printf("%d\n", cantidadinter);
size_t nBytes = x * sizeof(int);
int* h_A = (int*)malloc(nBytes);
int* h_B = (int*)malloc(nBytes);
//int* h_C = (int*)malloc(nBytes);
//int dev = 0;
//cudaSetDevice(dev);
// declare device vectors in the device (GPU) memory
int *d_A,*d_B;//*d_C;
for(int i = 0;i < x ; i++)
{
h_A[i] = 1;
//printf("[%d]", h_A[i]);
}
printf("\n");
cudaMalloc((int **)&d_A, nBytes);
cudaMalloc((int **)&d_B, nBytes);
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
for (int i = 0; i < x; ++i)
{
printf("%d\n", h_A[i]);
printf("%d\n", d_A[i]);
}
start_t = clock();
for(int k=1;k<cantidadinter;k++)
{
mover_autos <<<1, 4>>> (d_A, d_B, 4);
}
cudaMemcpy(h_A, d_A, nBytes, cudaMemcpyDeviceToHost);
cudaMemcpy(h_B, d_B, nBytes, cudaMemcpyDeviceToHost);
end_t = clock();
duration = (double)(end_t - start_t) / CLOCKS_PER_SEC;
printf("\nEL tiempo fue: %f",duration);
cudaFree(d_A);
cudaFree(d_B);
free (h_A);
free(h_B);
return cudaDeviceReset();
//return 0;
} |
8,288 | #include "includes.h"
#define tileSize 32
//function for data initialization
void initialization( double *M, double *N, int arow, int acol, int brow, int bcol);
//(for Debugging) prints out the input data
void printInput( double *M, double *N, int arow, int acol, int brow, int bcol);
//(for Debugging) prints out the output data
void printOutput( double *P_C, double *P_G, int arow, int bcol);
//GPU kernels
__global__
__global__ void matrixMultiplication(double* M, double* N, double* P, int widthAHeightB, int heightA, int widthB) {
int Mstart=widthAHeightB*tileSize*blockIdx.y;
int Mend=Mstart+ widthAHeightB - 1;
int mstep=tileSize;
int Nstart=tileSize*blockIdx.x;
int nstep=tileSize*widthB;
double temp=0;
__shared__ double Ms[tileSize][tileSize];
__shared__ double Ns[tileSize][tileSize];
//area where the tiles fits without "cutting"
if(Mstart < (heightA/tileSize)*tileSize*widthAHeightB && Nstart%widthB < (widthB/tileSize)*tileSize ){
for(int m=Mstart,n=Nstart;m<Mend;m+=mstep,n+=nstep){
Ms[threadIdx.y][threadIdx.x]=M[m+widthAHeightB*threadIdx.y+threadIdx.x];
Ns[threadIdx.y][threadIdx.x]=N[n+widthB*threadIdx.y+threadIdx.x];
__syncthreads();
for (int i = 0; i < tileSize; ++i) {
temp += Ms[threadIdx.y][i] * Ns[i][threadIdx.x];
}
__syncthreads();
}
} else {//the rest of the matrix
for(int m=Mstart,n=Nstart;m<=Mend;m+=mstep,n+=nstep){
if(m%widthAHeightB + threadIdx.x < widthAHeightB && blockIdx.y*tileSize + threadIdx.y < heightA){
Ms[threadIdx.y][threadIdx.x]=M[m+widthAHeightB*threadIdx.y+threadIdx.x];
}
else{
Ms[threadIdx.y][threadIdx.x]=0.0;
}
if((n/widthB) + threadIdx.y < widthAHeightB && blockIdx.x*tileSize + threadIdx.x < widthB){
Ns[threadIdx.y][threadIdx.x]=N[n+widthB*threadIdx.y+threadIdx.x];
}
else{
Ns[threadIdx.y][threadIdx.x]=0.0;
}
__syncthreads();
for (int i = 0; i < tileSize; ++i) {
temp += Ms[threadIdx.y][i] * Ns[i][threadIdx.x];
}
__syncthreads();
}
}
if(blockIdx.y*tileSize + threadIdx.y < heightA && blockIdx.x*tileSize + threadIdx.x < widthB){
P[widthB * tileSize * blockIdx.y + tileSize * blockIdx.x + widthB * threadIdx.y + threadIdx.x] = temp;
}
} |
8,289 | #include "includes.h"
__global__ void findPartIndicesNegStartKernel(int size, int *array, int *partIndices)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x + 1;
if(idx < size)
{
int value = array[idx];
int nextValue = array[idx + 1];
if(value != nextValue)
partIndices[value + 1] = idx;
}
} |
8,290 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#include <sys/time.h>
#include <string.h>
#define R_size 129
#define k 1024
#define n_size 128
__global__ void get_square(unsigned char a[], unsigned int accumulator[], unsigned int n);
void square(unsigned char *a, unsigned char *c, unsigned int size);
__global__ void get_products(unsigned char a[], unsigned char b[], unsigned int accumulator[], unsigned int n);
void barrett_reduction(unsigned char *buf_cpu, unsigned char *buf_gpu, unsigned char *gpu_r, unsigned char *gpu_n, unsigned char *reduction, unsigned char *temp_cpu, unsigned char *temp_gpu, unsigned char *shifted_cpu, unsigned char *shifted_gpu, unsigned char *xprime_cpu, unsigned char *xprime_gpu, unsigned char *result, unsigned char *tmp, unsigned int exp_size, unsigned int *kernel_buf, unsigned int *transfer, unsigned char *n);
void multiplication(unsigned char *a, unsigned char *b, unsigned char *c, unsigned int size);
char checkbit(unsigned char *exponent, unsigned int index_of_bit);
void subtraction(unsigned char *a, unsigned char *b, unsigned char *c, unsigned int size);
void bit_shift(unsigned char *a, unsigned char *b, unsigned int shift, unsigned int size_of_a);
void exponentiation(unsigned char *gpu_message, unsigned char *exponent, unsigned char *gpu_ciphertext, unsigned char *m0_copy, unsigned char *reduction, unsigned char *buf_cpu, unsigned char *buf_gpu, unsigned char *temp_cpu, unsigned char *temp_gpu, unsigned char *shifted_cpu, unsigned char *shifted_gpu, unsigned char *xprime_cpu, unsigned char *xprime_gpu, unsigned char *result, unsigned char *tmp, unsigned int exponent_size, unsigned char *gpu_r, unsigned char *gpu_n, unsigned int *kernel_buf, unsigned int *transfer, unsigned char *n);
int main(int argc, char *argv[]) {
unsigned char *n = (unsigned char *) calloc((4*n_size + n_size), sizeof(char));
n[0] = 0xcf;
n[1] = 0x82;
n[2] = 0x69;
n[3] = 0x57;
n[4] = 0x4d;
n[5] = 0xe7;
n[6] = 0x82;
n[7] = 0x1a;
n[8] = 0xe4;
n[9] = 0x20;
n[10] = 0x14;
n[11] = 0x47;
n[12] = 0x39;
n[13] = 0x52;
n[14] = 0x55;
n[15] = 0x28;
n[16] = 0xed;
n[17] = 0x3f;
n[18] = 0xa4;
n[19] = 0x61;
n[20] = 0xd3;
n[21] = 0xf4;
n[22] = 0xf2;
n[23] = 0x34;
n[24] = 0x6a;
n[25] = 0x54;
n[26] = 0xd1;
n[27] = 0x15;
n[28] = 0x7d;
n[29] = 0x67;
n[30] = 0xb;
n[31] = 0xc7;
n[32] = 0x8c;
n[33] = 0xfe;
n[34] = 0x1b;
n[35] = 0x68;
n[36] = 0x44;
n[37] = 0x7;
n[38] = 0x26;
n[39] = 0x99;
n[40] = 0xb;
n[41] = 0x4d;
n[42] = 0xc7;
n[43] = 0x3f;
n[44] = 0x52;
n[45] = 0x90;
n[46] = 0x2;
n[47] = 0x68;
n[48] = 0x3d;
n[49] = 0x83;
n[50] = 0x1d;
n[51] = 0x79;
n[52] = 0x7a;
n[53] = 0x3f;
n[54] = 0x36;
n[55] = 0xf3;
n[56] = 0x41;
n[57] = 0x8b;
n[58] = 0x7c;
n[59] = 0xdf;
n[60] = 0x64;
n[61] = 0xac;
n[62] = 0x74;
n[63] = 0x7c;
n[64] = 0x8;
n[65] = 0xdb;
n[66] = 0xa0;
n[67] = 0x6f;
n[68] = 0x10;
n[69] = 0x71;
n[70] = 0x13;
n[71] = 0x86;
n[72] = 0xaf;
n[73] = 0xb8;
n[74] = 0x71;
n[75] = 0xf8;
n[76] = 0xf0;
n[77] = 0x45;
n[78] = 0xa7;
n[79] = 0x94;
n[80] = 0xb3;
n[81] = 0x6b;
n[82] = 0x1e;
n[83] = 0xff;
n[84] = 0x8e;
n[85] = 0x13;
n[86] = 0xae;
n[87] = 0xc2;
n[88] = 0x59;
n[89] = 0x56;
n[90] = 0xd3;
n[91] = 0xd;
n[92] = 0x20;
n[93] = 0x62;
n[94] = 0x21;
n[95] = 0x30;
n[96] = 0x1d;
n[97] = 0x6b;
n[98] = 0x5e;
n[99] = 0xc;
n[100] = 0x0;
n[101] = 0x35;
n[102] = 0xae;
n[103] = 0xbd;
n[104] = 0xa5;
n[105] = 0xc2;
n[106] = 0x25;
n[107] = 0x98;
n[108] = 0xe7;
n[109] = 0x57;
n[110] = 0x89;
n[111] = 0xc;
n[112] = 0x12;
n[113] = 0xf9;
n[114] = 0x33;
n[115] = 0x3d;
n[116] = 0xa;
n[117] = 0xac;
n[118] = 0x51;
n[119] = 0xd8;
n[120] = 0x5c;
n[121] = 0x40;
n[122] = 0x9b;
n[123] = 0xfa;
n[124] = 0xf9;
n[125] = 0xbc;
n[126] = 0x3;
n[127] = 0xe6;
unsigned char *gpu_n;
cudaMalloc(&gpu_n, (4*n_size + n_size));
cudaMemcpy(gpu_n, n, (4*n_size + n_size), cudaMemcpyHostToDevice);
unsigned char *r = (unsigned char *) calloc(2*n_size, sizeof(char));
r[0] = 0x7f;
r[1] = 0x9d;
r[2] = 0xe9;
r[3] = 0x40;
r[4] = 0x57;
r[5] = 0x2;
r[6] = 0x6e;
r[7] = 0x93;
r[8] = 0x2b;
r[9] = 0xb4;
r[10] = 0xe3;
r[11] = 0xfd;
r[12] = 0xba;
r[13] = 0xc;
r[14] = 0xcd;
r[15] = 0x78;
r[16] = 0x7d;
r[17] = 0xae;
r[18] = 0x8d;
r[19] = 0x80;
r[20] = 0xff;
r[21] = 0x66;
r[22] = 0x33;
r[23] = 0xb;
r[24] = 0x28;
r[25] = 0x4c;
r[26] = 0x93;
r[27] = 0x30;
r[28] = 0x2;
r[29] = 0x92;
r[30] = 0xa0;
r[31] = 0x7c;
r[32] = 0xf1;
r[33] = 0xc;
r[34] = 0xa;
r[35] = 0x5e;
r[36] = 0xf2;
r[37] = 0x9a;
r[38] = 0x8f;
r[39] = 0x17;
r[40] = 0x4c;
r[41] = 0x82;
r[42] = 0x25;
r[43] = 0xe5;
r[44] = 0x98;
r[45] = 0x45;
r[46] = 0x4d;
r[47] = 0xc7;
r[48] = 0xd9;
r[49] = 0x53;
r[50] = 0x5e;
r[51] = 0x5a;
r[52] = 0x6e;
r[53] = 0x37;
r[54] = 0x43;
r[55] = 0x29;
r[56] = 0x88;
r[57] = 0xcb;
r[58] = 0xe9;
r[59] = 0x31;
r[60] = 0x2f;
r[61] = 0xd7;
r[62] = 0x6;
r[63] = 0xfb;
r[64] = 0xf1;
r[65] = 0x38;
r[66] = 0xdf;
r[67] = 0xc4;
r[68] = 0xda;
r[69] = 0x7c;
r[70] = 0x9;
r[71] = 0x5c;
r[72] = 0xf9;
r[73] = 0x2b;
r[74] = 0x81;
r[75] = 0x30;
r[76] = 0xe9;
r[77] = 0x29;
r[78] = 0xcd;
r[79] = 0x45;
r[80] = 0xee;
r[81] = 0xff;
r[82] = 0x5b;
r[83] = 0x3c;
r[84] = 0x23;
r[85] = 0x6d;
r[86] = 0xb9;
r[87] = 0xa1;
r[88] = 0x89;
r[89] = 0x3f;
r[90] = 0xc3;
r[91] = 0x9e;
r[92] = 0xa1;
r[93] = 0x30;
r[94] = 0x98;
r[95] = 0xf8;
r[96] = 0xc8;
r[97] = 0x4a;
r[98] = 0xbe;
r[99] = 0xc6;
r[100] = 0x49;
r[101] = 0xf7;
r[102] = 0xb3;
r[103] = 0xff;
r[104] = 0x9;
r[105] = 0x3b;
r[106] = 0x94;
r[107] = 0x9d;
r[108] = 0x2f;
r[109] = 0x5c;
r[110] = 0x68;
r[111] = 0xe1;
r[112] = 0x6;
r[113] = 0xf1;
r[114] = 0x33;
r[115] = 0xeb;
r[116] = 0xc5;
r[117] = 0x88;
r[118] = 0xa5;
r[119] = 0x1c;
r[120] = 0xde;
r[121] = 0x2c;
r[122] = 0x64;
r[123] = 0xad;
r[124] = 0x5c;
r[125] = 0xc9;
r[126] = 0xeb;
r[127] = 0x1c;
r[128] = 0x1;
unsigned char *gpu_r;
cudaMalloc(&gpu_r, (2*n_size));
cudaMemcpy(gpu_r, r, 2*n_size, cudaMemcpyHostToDevice);
unsigned char *message = (unsigned char *) calloc(n_size, sizeof(char));
message[0] = 0x68;//h
message[1] = 0x65;//e
message[2] = 0x6c;//l
message[3] = 0x6c;//l
message[4] = 0x6f;//o
unsigned char *gpu_message;
cudaMalloc(&gpu_message, n_size);
cudaMemcpy(gpu_message, message, n_size, cudaMemcpyHostToDevice);
unsigned char *exponent = (unsigned char *) malloc(3);
exponent[0] = 0x01;
exponent[1] = 0x00;
exponent[2] = 0x01;
unsigned int exponent_size = 3;
//exponentiate m^e mod n
//parameters:
//message(m)
//exponent(e)
//precomputation of r = floor((4^k)/n) where k is found by where (2^k) > n
//modulus (n)
unsigned char *cpu_ciphertext = (unsigned char *) calloc(n_size, sizeof(char));
unsigned char *gpu_ciphertext;
cudaMalloc(&gpu_ciphertext, n_size);
cudaMemset(gpu_ciphertext, 0x00, n_size);
unsigned char *m0_copy;
cudaMalloc(&m0_copy, n_size);
unsigned char *reduction = (unsigned char *) calloc(n_size, sizeof(char));
unsigned char *buf_cpu = (unsigned char *) calloc((n_size * 2) + 1, sizeof(char));
unsigned char *buf_gpu;
cudaMalloc(&buf_gpu, ((n_size * 2) + 1));
cudaMemset(buf_gpu, 0x00, (n_size * 2) + 1);
unsigned char *temp_cpu = (unsigned char *) calloc(3*n_size, sizeof(char));
unsigned char *temp_gpu;
cudaMalloc(&temp_gpu, (3*n_size));
unsigned char *shifted_cpu = (unsigned char *) calloc(n_size, sizeof(char));
unsigned char *shifted_gpu;
cudaMalloc(&shifted_gpu, n_size);
cudaMemset(shifted_gpu, 0x00, n_size);
unsigned char *xprime_cpu = (unsigned char *) calloc(2*n_size, sizeof(char));
unsigned char *xprime_gpu;
cudaMalloc(&xprime_gpu, (2*n_size));
cudaMemset(xprime_gpu, 0x00, 2*n_size);
unsigned char *result = (unsigned char *) calloc(n_size + 1, sizeof(char));
unsigned char *tmp = (unsigned char *) calloc(n_size + 1, sizeof(char));
unsigned int *transfer = (unsigned int *) calloc(4*n_size, sizeof(int));
unsigned int *kernel_buf;
cudaMalloc(&kernel_buf, 4*n_size*sizeof(int));
cudaMemset(kernel_buf, 0x00, 4*n_size*sizeof(int));
cudaError_t error;
cudaEvent_t start;
error = cudaEventCreate(&start);
if(error != cudaSuccess)
printf("error\n");
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if(error != cudaSuccess)
printf("error\n");
error = cudaEventRecord(start, NULL);
exponentiation(gpu_message, exponent, gpu_ciphertext, m0_copy, reduction, buf_cpu, buf_gpu, temp_cpu, temp_gpu, shifted_cpu, shifted_gpu, xprime_cpu, xprime_gpu, result, tmp, exponent_size, gpu_r, gpu_n, kernel_buf, transfer, n);
unsigned int d_exponent_size = 128;
unsigned char *d_exponent = (unsigned char *) malloc(128);
d_exponent[0] = 0x91;
d_exponent[1] = 0xa;
d_exponent[2] = 0xb3;
d_exponent[3] = 0x66;
d_exponent[4] = 0xbd;
d_exponent[5] = 0x6f;
d_exponent[6] = 0x18;
d_exponent[7] = 0xde;
d_exponent[8] = 0xd5;
d_exponent[9] = 0x1;
d_exponent[10] = 0x61;
d_exponent[11] = 0x36;
d_exponent[12] = 0x95;
d_exponent[13] = 0x6d;
d_exponent[14] = 0xdd;
d_exponent[15] = 0x33;
d_exponent[16] = 0xdb;
d_exponent[17] = 0x26;
d_exponent[18] = 0x3;
d_exponent[19] = 0xe;
d_exponent[20] = 0x68;
d_exponent[21] = 0x54;
d_exponent[22] = 0x73;
d_exponent[23] = 0xa0;
d_exponent[24] = 0xe0;
d_exponent[25] = 0x6e;
d_exponent[26] = 0x70;
d_exponent[27] = 0x74;
d_exponent[28] = 0x25;
d_exponent[29] = 0x8b;
d_exponent[30] = 0x2b;
d_exponent[31] = 0xfb;
d_exponent[32] = 0x9e;
d_exponent[33] = 0x3c;
d_exponent[34] = 0x34;
d_exponent[35] = 0x2e;
d_exponent[36] = 0x45;
d_exponent[37] = 0x10;
d_exponent[38] = 0x10;
d_exponent[39] = 0x6c;
d_exponent[40] = 0xfb;
d_exponent[41] = 0xb7;
d_exponent[42] = 0x9b;
d_exponent[43] = 0xc8;
d_exponent[44] = 0xcf;
d_exponent[45] = 0x71;
d_exponent[46] = 0xd9;
d_exponent[47] = 0x96;
d_exponent[48] = 0xb7;
d_exponent[49] = 0xbb;
d_exponent[50] = 0x5f;
d_exponent[51] = 0x19;
d_exponent[52] = 0x76;
d_exponent[53] = 0x36;
d_exponent[54] = 0x49;
d_exponent[55] = 0x6a;
d_exponent[56] = 0xb3;
d_exponent[57] = 0x83;
d_exponent[58] = 0xc3;
d_exponent[59] = 0x59;
d_exponent[60] = 0x2e;
d_exponent[61] = 0x62;
d_exponent[62] = 0x87;
d_exponent[63] = 0xa2;
d_exponent[64] = 0x5a;
d_exponent[65] = 0x2f;
d_exponent[66] = 0x60;
d_exponent[67] = 0x75;
d_exponent[68] = 0x1;
d_exponent[69] = 0xf0;
d_exponent[70] = 0x3f;
d_exponent[71] = 0xdb;
d_exponent[72] = 0x5a;
d_exponent[73] = 0x70;
d_exponent[74] = 0x1f;
d_exponent[75] = 0x44;
d_exponent[76] = 0x6a;
d_exponent[77] = 0x9c;
d_exponent[78] = 0x77;
d_exponent[79] = 0x63;
d_exponent[80] = 0xba;
d_exponent[81] = 0xcb;
d_exponent[82] = 0xcd;
d_exponent[83] = 0x1f;
d_exponent[84] = 0x99;
d_exponent[85] = 0x70;
d_exponent[86] = 0x89;
d_exponent[87] = 0x94;
d_exponent[88] = 0x31;
d_exponent[89] = 0x2;
d_exponent[90] = 0xa;
d_exponent[91] = 0x32;
d_exponent[92] = 0x96;
d_exponent[93] = 0x65;
d_exponent[94] = 0x21;
d_exponent[95] = 0x21;
d_exponent[96] = 0x59;
d_exponent[97] = 0x55;
d_exponent[98] = 0x8a;
d_exponent[99] = 0xd0;
d_exponent[100] = 0x7a;
d_exponent[101] = 0x1c;
d_exponent[102] = 0xd2;
d_exponent[103] = 0x66;
d_exponent[104] = 0x48;
d_exponent[105] = 0x95;
d_exponent[106] = 0x8;
d_exponent[107] = 0xd3;
d_exponent[108] = 0x6b;
d_exponent[109] = 0xe7;
d_exponent[110] = 0x9c;
d_exponent[111] = 0xb9;
d_exponent[112] = 0x96;
d_exponent[113] = 0x20;
d_exponent[114] = 0x20;
d_exponent[115] = 0x8a;
d_exponent[116] = 0xe5;
d_exponent[117] = 0x4d;
d_exponent[118] = 0x3e;
d_exponent[119] = 0x53;
d_exponent[120] = 0x4b;
d_exponent[121] = 0xd8;
d_exponent[122] = 0x21;
d_exponent[123] = 0x4;
d_exponent[124] = 0x81;
d_exponent[125] = 0x7d;
d_exponent[126] = 0x29;
d_exponent[127] = 0x38;
memset(message, 0x00, n_size);
cudaMemset(gpu_message, 0x00, n_size);
exponentiation(gpu_ciphertext, d_exponent, gpu_message, m0_copy, reduction, buf_cpu, buf_gpu, temp_cpu, temp_gpu, shifted_cpu, shifted_gpu, xprime_cpu, xprime_gpu, result, tmp, d_exponent_size, gpu_r, gpu_n, kernel_buf, transfer, n);
error = cudaEventRecord(stop, NULL);
error = cudaEventSynchronize(stop);
if(error != cudaSuccess)
printf("error\n");
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
printf("GPU time: %.6f\n", msecTotal / 1000);
cudaMemcpy(message, gpu_message, n_size, cudaMemcpyDeviceToHost);
int z = 0;
while (z < n_size) {
printf("message[%d] = %x\n", z, message[z]);
z++;
}
return 0;
}
void exponentiation(unsigned char *gpu_message, unsigned char *exponent, unsigned char *gpu_ciphertext, unsigned char *m0_copy, unsigned char *reduction, unsigned char *buf_cpu, unsigned char *buf_gpu, unsigned char *temp_cpu, unsigned char *temp_gpu, unsigned char *shifted_cpu, unsigned char *shifted_gpu, unsigned char *xprime_cpu, unsigned char *xprime_gpu, unsigned char *result, unsigned char *tmp, unsigned int exponent_size, unsigned char *gpu_r, unsigned char *gpu_n, unsigned int *kernel_buf, unsigned int *transfer, unsigned char *n) {
dim3 blocksPerGrid(2);
dim3 threadsPerBlock(64);
//get the total amount of bits in strlen(exponent) zero based
//not including the final char index msb (byte)
unsigned int total_bits = exponent_size * 8 - 1;
//find the most signinficant bit in the most significant byte (char index)
//find most significant bit in exponent[exp_size - 1]
unsigned char mask = 0x80; //10000000 in binary
unsigned char msb = 0;
int i = 0;
while(i < 8) {
if((exponent[exponent_size - 1] & (mask >> i)) == (mask >> i)) {
msb = i;
break;
}
i++;
}
//subtract most significant bit from total_bits to know total amount of significant bits
//for loop of exponent in binary
unsigned int exp_bits = (total_bits - msb);
//keep copy of original message m0
cudaMemcpy(m0_copy, gpu_message, n_size, cudaMemcpyDeviceToDevice);
//compute m^e where e is in binary
//RULES:
//iterate over the values of msb to 0 bit by bit
//msb is amount of relevent bits to check for exponentiation
//total bits is the amount of total bits in exponent lenth
//square m(current) for each itteration
//check if current bit is 1
//current bit is 1: m(current) * m0
//curent bit is 0: return to loop
//subtract one from total because to exponentiate in binary
//start at the second bit after the most significant bit
//each bit equals m^2 and when the current bit is 1 it is
//(m^2)*m0 or if it is 0 then m^2
int index_of_bit = exp_bits - 1; //subtraction of 1 is becuase msb is zero based
while (index_of_bit >= 0) {
//allocate space for reduction to hold a value strickly less than n
//buf holds value at most m^2 which is less than n^2
//calculate m^2
get_square<<<blocksPerGrid, threadsPerBlock>>>(gpu_message, kernel_buf, n_size);
cudaMemcpy(transfer, kernel_buf, 2*n_size*sizeof(unsigned int), cudaMemcpyDeviceToHost);
unsigned int index = 0;
while(index < 2*n_size) {
buf_cpu[index] = (unsigned char) transfer[index];
transfer[index + 1] += (unsigned int) (transfer[index]>>8);
index++;
}
memset(transfer, 0x00, 4*n_size*sizeof(int));
cudaMemset(kernel_buf, 0x00, 4*n_size*sizeof(int));
cudaMemcpy(buf_gpu, buf_cpu, 2*n_size, cudaMemcpyHostToDevice);
//calculate m^2 mod n
barrett_reduction(buf_cpu, buf_gpu, gpu_r, gpu_n, reduction, temp_cpu, temp_gpu, shifted_cpu, shifted_gpu, xprime_cpu, xprime_gpu, result, tmp, exponent_size, kernel_buf, transfer, n);
cudaMemcpy(gpu_message, reduction, n_size, cudaMemcpyHostToDevice);
cudaMemset(buf_gpu, 0x00, 2*n_size);
memset(buf_cpu, 0x00, 2*n_size);
memset(reduction, 0x00, n_size);
char bit;
if ((bit = checkbit(exponent, index_of_bit)) == 1) {
//m * m0
get_products<<<blocksPerGrid, threadsPerBlock>>>(gpu_message, m0_copy, kernel_buf, n_size);
cudaMemcpy(transfer, kernel_buf, 2*n_size*sizeof(unsigned int), cudaMemcpyDeviceToHost);
index = 0;
while(index < 2*n_size) {
buf_cpu[index] = (unsigned char) transfer[index];
transfer[index + 1] += (unsigned int) (transfer[index]>>8);
index++;
}
memset(transfer, 0x00, 4*n_size*sizeof(int));
cudaMemset(kernel_buf, 0x00, 4*n_size*sizeof(int));
cudaMemcpy(buf_gpu, buf_cpu, 2*n_size, cudaMemcpyHostToDevice);
barrett_reduction(buf_cpu, buf_gpu, gpu_r, gpu_n, reduction, temp_cpu, temp_gpu, shifted_cpu, shifted_gpu, xprime_cpu, xprime_gpu, result, tmp, exponent_size, kernel_buf, transfer, n);
cudaMemcpy(gpu_message, reduction, n_size, cudaMemcpyHostToDevice);
cudaMemset(buf_gpu, 0x00, 2*n_size);
memset(buf_cpu, 0x00, 2*n_size);
memset(reduction, 0x00, n_size);
}
index_of_bit--;
}
//copy back final value of message to ciphertext for decryption
cudaMemcpy(gpu_ciphertext, gpu_message, n_size, cudaMemcpyDeviceToHost);
cudaMemset(m0_copy, 0x00, n_size);
return;
}
void barrett_reduction(unsigned char *buf_cpu, unsigned char *buf_gpu, unsigned char *gpu_r, unsigned char *gpu_n, unsigned char *reduction, unsigned char *temp_cpu, unsigned char *temp_gpu, unsigned char *shifted_cpu, unsigned char *shifted_gpu, unsigned char *xprime_cpu, unsigned char *xprime_gpu, unsigned char *result, unsigned char *tmp, unsigned int exp_size, unsigned int *kernel_buf, unsigned int *transfer, unsigned char *n) {
////////////////////////////////////////////////////////////////////////////////////
//////////////////// calculate: t = x - ((x*r)/(4^k))*n /////////////////////////
///////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////
//multiply: x * r = temp
//size of x is assumed to be the largest value which is = largest value of 2*n
//size of r is precomputed
dim3 blocksPerGrid_two(4);
dim3 threadsPerBlock_two(64);
get_products<<<blocksPerGrid_two, threadsPerBlock_two>>>(gpu_r, buf_gpu, kernel_buf, 2*n_size);
cudaMemcpy(transfer, kernel_buf, 3*n_size*sizeof(unsigned int), cudaMemcpyDeviceToHost);
unsigned int index = 0;
while(index < 3*n_size) {
temp_cpu[index] = (unsigned char) transfer[index];
transfer[index + 1] += (unsigned int) (transfer[index]>>8);
index++;
}
cudaMemset(kernel_buf, 0x00, 4*n_size*sizeof(int));
//shift bits by (4^k) or (2^(2*k))
//shift temp by 2*k store to shifted
//size of shifted is 2*n + sizeof(r)
//find the actual amount of bits/bytes left in the value of temp
//which is equal to x * r so that the correct size of the value
//can be used in the bit_shift function
//first find the amount of bytes from most significant byte
//to least and then when one char does not equal to 0x00
unsigned int zero_bytes = 0;
int count = (3*n_size) - 1;
while((count >= 0) && (temp_cpu[count] == 0x00)) {
count--;
zero_bytes++;
}
bit_shift(temp_cpu, shifted_cpu, k, (3*n_size) - zero_bytes);
//multiply: shifted * n = xprime
//xprime is the size of 2*n + R_size - (k >> 0x07) + n
cudaMemcpy(shifted_gpu, shifted_cpu, n_size, cudaMemcpyHostToDevice);
dim3 blocksPerGrid_one(2);
dim3 threadsPerBlock_one(64);
get_products<<<blocksPerGrid_one, threadsPerBlock_one>>>(shifted_gpu, gpu_n, kernel_buf, n_size);
cudaMemcpy(transfer, kernel_buf, 2*n_size*sizeof(unsigned int), cudaMemcpyDeviceToHost);
index = 0;
while(index < 2*n_size) {
xprime_cpu[index] = (unsigned char) transfer[index];
transfer[index + 1] += (unsigned int) (transfer[index]>>8);
index++;
}
//subtract xprime from x^2
cudaMemset(kernel_buf, 0x00, 4*n_size*sizeof(int));
subtraction(buf_cpu, xprime_cpu, result, 2*n_size);
//the field of n, if the value is not within the field of n then reduce the value by subtracting
//the value of result = t - n which is guaranteed to be in the field of n
if ((result[n_size] == 0x00) && (result[n_size - 1] < n[n_size - 1])) {
memcpy(reduction, result, n_size);
}
else {
unsigned char *tmp = (unsigned char *) calloc(n_size + 1, sizeof(char));
subtraction(result, n, tmp, n_size + 1);
memcpy(reduction, tmp, n_size);
memset(tmp, 0x00, n_size + 1);
}
memset(temp_cpu, 0x00, 3*n_size);
cudaMemset(temp_gpu, 0x00, 3*n_size);
memset(shifted_cpu, 0x00, n_size);
cudaMemset(shifted_gpu, 0x00, n_size);
memset(xprime_cpu, 0x00, 2*n_size);
cudaMemset(xprime_gpu, 0x00, 2*n_size);
memset(result, 0x00, n_size + 1);
memset(transfer, 0x00, 4*n_size*sizeof(int));
return;
}
void subtraction(unsigned char *a, unsigned char *b, unsigned char *c, unsigned int size) {
//borrow represents the value 1 or 0 for the current index
//indecating if the current index has been borrowed from by
//the previous index, borrow = 1 true, 0 false
unsigned char borrow = 0x00; //value is 0 or 1
//loop through array a size and subtract a - b,
//a is guaranted to be greater than b in
//barrett reduction
unsigned int i = 0;
while(i < size) {
//check current value of a to make sure that it is
//not 0 when the previous index has borrowed
if (a[i] == 0 && borrow == 1) {
//borrow from next sequential index with
//0x100 and subtract 0x01 for the
//previous borrow which is = 0xff
c[i] = 0xff - b[i];
//turn on borrow for next index
borrow = 0x01;
i++;
continue;
}
//calculate current value of a along with if the
//previous index has borrowed
a[i] = a[i] - borrow;
//calculate the value of a - b only when a - b >= 0
//borrow has already been accounted for
if (a[i] >= b[i]) {
c[i] = a[i] - b[i];
borrow = 0x00;
}
//a - b !> 0, borrow from next sequential index by
//taking the value 0x100 and adding to a[i] and
//subtracting b[i] which will give a value between
//{0x01...0xff} and turn on borrow for next index
else {
c[i] = 0x100 + a[i] - b[i];
borrow = 0x01;
}
i++;
}
return;
}
char checkbit(unsigned char *exponent, unsigned int index_of_bit) {
unsigned char bit;
//get the characters index of which the bit is located in by
//taking index_of_bit which is the size of the bits left to
//check and divide by 8 giving the location index of the
//current bit to be checked
unsigned int quotient = (index_of_bit >> 0x03); // index_of_bit / 8
//find the bit within the index previously found by finding the
//remainder of 8 % index of bit, this will locate the exact
//bit to be checked
unsigned int remainder = index_of_bit & (0x07); // index_of_bit % 8
//mask is equivelent to 1 in order to compare a single bit with a
//the current bit to be checked
unsigned char mask = 0x01; // use single bit to mask with selected bit
//use the remainder by knowing the index of the character and
//the remainder allows the bit to be shifted to the position of
//the current bit to be checked
mask = mask << remainder; // shift single bit to bit_in_index position
//bit is now located at index_of_bit character index of array
//and bit location bit_in_index in group of 8 bits at index
bit = (exponent[quotient] & mask); // & to see if single bit is on or off
//shift bit back to the 1 position to represent value 1 or 0
bit = bit >> remainder; //shift bit back to value of one or zero
return bit;
}
//b is expected to be completely zero before shift
void bit_shift(unsigned char *a, unsigned char *b, unsigned int k_val, unsigned int size_of_a) {
//expected that k will be equivlent to some power of 2
//represents the division of (4^k) which is = (2^(2*k))
unsigned int shift = k_val * 2;
//quotient represents groups of 8 bits that equal 0 as in >> 8 in single char
//leaving it to be the value of 0x00
unsigned int quotient = shift >> 0x03; // k / 8 as integer
//in case that the shift is greater than the actual value of the
//number being shifted
if(quotient > size_of_a) {
return;
}
//printf("quotient = %d\n", quotient);
//remainder will find final char index shift value = {0...7}
//the specific bits to be shifted in the last group which is not greater than 7
unsigned int remainder = shift & 0x07; // k % 8
//printf("remainder = %d\n", remainder);
//move a to b by shifting the characters an index of quotient amount
//and then use the remainder to shift the final index to correct
//position
unsigned int constant = (size_of_a - quotient);
unsigned int j = 0;
while (j < constant) {
b[j] = a[quotient + j] >> remainder;
unsigned char cpy_bits = a[quotient + j + 1] << (8 - remainder);
b[j] = b[j] | cpy_bits;
j++;
}
return;
}
__global__ void get_products(unsigned char a[], unsigned char b[], unsigned int accumulator[], unsigned int n) {
int multiplier = 0;
unsigned int index = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int multiplicand = index;
unsigned int product = 0;
while(multiplier < n) {
product = (unsigned int) a[multiplier] * b[multiplicand];
atomicAdd(&accumulator[multiplier + index], product<<24>>24);
atomicAdd(&accumulator[multiplier + index + 1], product>>8);
multiplier++;
}
return;
}
__global__ void get_square(unsigned char a[], unsigned int accumulator[], unsigned int n) {
int multiplier = 0;
unsigned int index = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int multiplicand = index;
unsigned int product = 0;
while(multiplier < n) {
product = (unsigned int) a[multiplier] * a[multiplicand];
atomicAdd(&accumulator[multiplier + index], product<<24>>24);
atomicAdd(&accumulator[multiplier + index + 1], product>>8);
multiplier++;
}
return;
}
|
8,291 | /* objective
* C = A*B // A[m][k], B[k][n], C[m][n]
* compile: nvcc --gpu-architecture=compute_60 --gpu-code=sm_60 -O3 matmul_double.cu -o matmul_double
*/
#include <iostream>
#include <cstdlib>
#define EC(ans) { chkerr((ans), __FILE__, __LINE__); }
inline void chkerr(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) << " File: " << file << " Line: " << line << '\n';
exit(-1);
}
}
void init (double *A, double *B, int M , int N, int K)
{
for (int i = 0; i < M; ++i)
{
for (int j = 0; j < K; ++j)
{
A[i * K + j] = i * K + j;
}
}
for (int i = 0; i < K; ++i)
{
for (int j = 0; j < N; ++j)
{
B[i * N + j] = i * N + j + 1;
}
}
}
void matmul_double_host(double* A, double* B, double* C, int M, int N, int K)
{
for (int i = 0; i < M; ++i)
{
for (int j = 0; j < N; ++j)
{
double tmp = 0;
for (int k = 0; k < K; ++k)
{
tmp += A[i * K + k] * B[k * N + j];
}
C[i * N + j] = tmp;
}
}
}
__global__ void matmul_double(double* A, double* B , double* C, int M, int N, int K)
{
/// complete code
}
void validate (double *host, double *gpu, int M, int N)
{
for (int i = 0; i < M; ++i)
{
for (int j = 0; j < N; ++j)
{
if(std::abs(host[i * N + j] - gpu[i * N + j]) > 1e-3)
{
std::cerr << "possible error at position " << i << ',' << j << " host: " << host[i * N + j] << " device " << gpu[i * N + j] << '\n';
}
}
}
}
int main(int argc, char *argv[])
{
if(argc < 3)
{
std::cerr << "Usage: ./matmul_double M N K\n";
exit(-1);
}
int M = std::atoi(argv[1]);
int N = std::atoi(argv[2]);
int K = std::atoi(argv[3]);
/* Host alloc */
double *hA = (double*) malloc (M * K * sizeof(double));
double *hB = (double*) malloc (K * N * sizeof(double));
double *hC = (double*) malloc (M * N * sizeof(double));
double *dtohC = (double*) malloc (M * N * sizeof(double));
/* Device alloc */
/// complete code
/* Initialize host memory*/
init(hA, hB, M, N, K);
/* host compute */
matmul_double_host(hA, hB, hC, M, N, K);
/* Copy from host to device */
/// complete code
/* call gpu kernel */
/// complete code
/* Copy from device to host (dC -> dtohC) */
/// complete code
/* host vs device validation */
validate(hC, dtohC, M, N);
/* be clean */
free(hA);
free(hB);
free(hC);
free(dtohC);
/// add code to free gpu memory
return 0;
}
|
8,292 | extern "C"
__global__ void setDomainPoolKernel(
int nBatch, int rbs, int nDegree, int nD, int dScale, int rScale,
int expansion, int isCenAlign, float regularize,
float *data,float *dataRev, // array of data and reverse data
// arrays pointer
float *DA,
float *AA,
float *IA,
// pointer of array of pointer to pointer of array in arrays, nevermind i just stun you.
// p(i) = data(i + size(data))
float **DP,
float **AP,
float **IP
)
{
int taskIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (taskIdx < nBatch){
// initialize domain arrays
// array structure
// DA = rbs-rows , 1 + D_1^1 + D_2^1 + D_3^1 + ... + D_ds^2
// nCoeff is power of Domains. start from power 0
// nDegree determine the number of degree (maximum degree + 1)
// nD determine the number of domain blocks
// dScale determing the scale of domain size compare to rbs
// dpOffset determine the number of bytes padded for each Array
int nCoeff = ((nDegree - 1) * nD + 1);
// pointing array of pointers to array
const int daOffset = taskIdx * rbs * rScale * nCoeff;
const int aaOffset = taskIdx * nCoeff * nCoeff;
const int iaOffset = taskIdx * nCoeff * nCoeff;
DP[taskIdx] = &DA[daOffset];
AP[taskIdx] = &AA[aaOffset];
IP[taskIdx] = &IA[iaOffset];
// initialize covariance matrix with regularization
for(int i = 0; i < nCoeff * nCoeff; i++){
AA[aaOffset + i] = 0.0f;
}
for(int i = 0; i < nCoeff * nCoeff; i+= nCoeff+1){
// set diagonal to regularization parameter
AA[aaOffset + i] = regularize * regularize;
}
// initialize first column covariance matrix
for(int i = 0; i < rbs * rScale; i++){
DA[daOffset + i] = 1.0f; // power 0
}
int dIdx = taskIdx % (nBatch/2);
// for each block number dn
for(int dn = 1; dn <= nD; dn++){
// set reference domain block
// compute sumScale
int sumScale = 0;
for(int k = 1; k <= nD && k < dn; k++){
sumScale += (int) powf( (float) dScale, (float) (1 + expansion * (k - 1))) ;
}
int dnScale = (int) powf( (float) dScale, (float) (1 + expansion * (dn - 1)));
//int dnIdx = dIdx + rbs * sumScale; // * domian location factor
int dnIdx = dIdx;
if( isCenAlign == 0 ){
// if left aligned
dnIdx = dIdx + rbs * sumScale + 1;
} else {
// if center aligned
dnIdx = dIdx + rbs/2 * (1 - dnScale) + 1;
}
int padDA = rbs * rScale * dn; // number of row of DA
// initialize column dn-th index
for(int i = 0; i < rbs * rScale; i++){
DA[daOffset + padDA + i] = 0.0f; // power 1
}
// construct DA from domain blocks at power 1
// copy elements
for(int i = 0; i < rbs * rScale; i++){
int datIdx = dnIdx + i*dnScale/rScale;
if(datIdx >=0 && datIdx < (nBatch/2)){
if(taskIdx < (nBatch/2)){
DA[daOffset + padDA + i] =
DA[daOffset + padDA + i] + data[datIdx];
}else{ // gen reverse domain
DA[daOffset + padDA + i] =
DA[daOffset + padDA + i] + dataRev[datIdx];
}
}
}
// // handling if domain blocks are larger than rbs (by downsample)
for(int ds = 1; ds < dnScale / rScale; ds++){
// vec sumation
for(int i = 0; i < rbs * rScale; i++){
int datIdx = dnIdx + ds + i*dnScale/rScale;
if(datIdx >=0 && datIdx < (nBatch/2)){
if(taskIdx < (nBatch/2)){
DA[daOffset + padDA + i] =
DA[daOffset + padDA + i] + data[datIdx];
}else{ // gen reverse domain
DA[daOffset + padDA + i] =
DA[daOffset + padDA + i] + dataRev[datIdx];
}
}
}
}
// vec scalig after resample
for(int i = 0; i < rbs * rScale; i++){
DA[daOffset + padDA + i] = DA[daOffset + padDA + i] / (dnScale/rScale);
}
// calculate next degree
for(int deg = 2; deg <= nDegree - 1; deg++){
int degPad = rbs * nD * (deg - 2) + rbs * dn;
int nextDegPad = rbs * nD * (deg - 1) + rbs * dn;
for(int i = 0; i < rbs * rScale; i++){
// power n>=2
// D^n = D^1 * D^(n-1)
DA[daOffset + nextDegPad + i] =
DA[daOffset + rbs*dn + i] * DA[daOffset + degPad + i] ;
}
}
}
}
}
|
8,293 | #include "includes.h"
__global__ void force_calc ( float *Force, int num_atom, int num_q, float *f_ptxc, float *f_ptyc, float *f_ptzc, int num_atom2, int num_q2, int *Ele, float force_ramp) {
// Do column tree sum of f_ptxc for f_ptx for every atom, then assign threadIdx.x == 0 (3 * num_atoms) to Force. Force is num_atom * 3.
if (blockIdx.x >= num_atom) return;
for (int ii = blockIdx.x; ii < num_atom; ii += gridDim.x) {
for (int stride = num_q2 / 2; stride > 0; stride >>= 1) {
__syncthreads();
for(int iAccum = threadIdx.x; iAccum < stride; iAccum += blockDim.x) {
f_ptxc[ii + iAccum * num_atom2] += f_ptxc[ii + iAccum * num_atom2 + stride * num_atom2];
f_ptyc[ii + iAccum * num_atom2] += f_ptyc[ii + iAccum * num_atom2 + stride * num_atom2];
f_ptzc[ii + iAccum * num_atom2] += f_ptzc[ii + iAccum * num_atom2 + stride * num_atom2];
}
}
__syncthreads();
if (threadIdx.x == 0) {
if (Ele[ii]) {
Force[ii*3 ] = f_ptxc[ii] * force_ramp;
Force[ii*3 + 1] = f_ptyc[ii] * force_ramp;
Force[ii*3 + 2] = f_ptzc[ii] * force_ramp;
}
}
__syncthreads();
}
} |
8,294 | #include <stdio.h>
#include <string>
#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
using namespace std;
__global__ void printArray(int *index, int outputsize) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < outputsize) {
printf(" %d", index[i]);
}
}
__global__ void printArray(double *index, int outputsize) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < outputsize) {
printf(" %f", index[i]);
}
}
__global__ void Count_number_of_term(int *A, int *Df) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int value = A[i] - 1;
atomicAdd(&Df[value], 1);
}
__global__ void Kogge_Stone_scan_kernel(int *df, int *index, int InputSize, int thread_num, int *temp) {
__shared__ int XY[100];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < InputSize) {
XY[threadIdx.x] = df[i];
__syncthreads();
}
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) {
__syncthreads();
if (threadIdx.x >= stride) {
XY[threadIdx.x] += XY[threadIdx.x - stride];
}
}
if (i < thread_num-1) {
index[i + 1] = XY[threadIdx.x];
}
__syncthreads();
}
__global__ void Create_InvertedIndexA (int *A, int *B, int *Df, int *Index ,int *InvertedIndexA) {
int temp = Index[threadIdx.x] + Df[threadIdx.x];
if (blockIdx.x == 0) {
for (unsigned int i = Index[threadIdx.x]; i < temp; i++) {
__syncthreads();
InvertedIndexA[i] = threadIdx.x + 1;
}
}
}
__global__ void Create_InvertedIndexB(int *A, int *B, double *C, int *Df, int *Index, int *InvertedIndexB, double *InvertedIndexC) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
__syncthreads();
int temp = 0;
temp = Index[B[i]-1];
__syncthreads();
int value = B[i] - 1;
int a = 0;
a=atomicAdd(&Index[value], 1);
InvertedIndexB[a] = A[i];
InvertedIndexC[a] = C[i];
__syncthreads();
}
// KNN Start
__global__ void knnTD(int *terms, double *qnorm, int *invertedIndex, double *norms, double *docs, int *index) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
double temp = 0;
for (int j = index[terms[i]]; j < index[terms[i]+1]; j++) {
docs[invertedIndex[j]]+=qnorm[i]*norms[j];
__syncthreads();
}
}
__global__ void knn(int* terms, double *qnorm, double *docs, double *docNorm, double queryNorm) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(docNorm[i] == 0 || queryNorm == 0) {
docs[i] = 0;
} else {
docs[i] = docs[i]/(docNorm[i]*queryNorm);
}
}
__global__ void printlist(int *terms, int *invertedIndex, int *index) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
printf("Term -> %d\n", index[terms[i]]);
for (int j = index[terms[i]]; j < index[terms[i]+1]; j++) {
printf("%d -> %d\n", terms[i], invertedIndex[j]);
}
printf("\n");
}
__global__ void getDocNorm(int *docs, int *terms, double *norms, double *dn, int num) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
double temp = 0.0;
for (size_t j = 0; j < num; j++) {
if(docs[j] == i) {
temp+=(norms[j]*norms[j]);
}
}
__syncthreads();
dn[i] = sqrt(temp);
}
__global__ void oddEvenSort(double *data, int *dl, int num_elem) {
int tid = (blockIdx.x*blockDim.x) + threadIdx.x;
int tid_idx;
int offset = 0; //Start off with even, then odd
int num_swaps;
dl[tid] = tid+1;
__syncthreads();
//Calculation maximum index for a given block
//Last block it is number of elements minus one
//Other blocks to end of block minus one
int tid_idx_max = min((((blockIdx.x + 1)*(blockDim.x * 2)) - 1), (num_elem - 1));
do
{
//Reset number of swaps
num_swaps = 0;
//work out index of data
tid_idx = (tid * 2) + offset;
//If no array or block overrun
if (tid_idx < tid_idx_max) {
//Read values into registers
double d0 = data[tid_idx];
int db0 = dl[tid_idx];
double d1 = data[tid_idx + 1];
int db1 = dl[tid_idx + 1];
//Compare registers
if (d0 < d1) {
//Swap values if needed
data[tid_idx] = d1;
dl[tid_idx] = db1;
data[tid_idx + 1] = d0;
dl[tid_idx + 1] = db0;
//keep track that we did a swap
num_swaps++;
}
}
//Switch from even to off, or odd to even
if (offset == 0) {
offset = 1;
}
else {
offset = 0;
}
} while (__syncthreads_count(num_swaps) != 0);
}
__global__ void classify(int *dl, double *dn, int k, double *dc, int *u) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t j = 0; j < k; j++) {
if(dl[j]/10 == u[i]) {
dc[i] += dn[j];
}
}
}
// KNN end
int main(int argc, char **argv)
{
int k;
printf("%d\n", argc);
if(argc < 2) {
k = 3;
} else {
k = atoi(argv[1]);
}
// Read term-doc pairs
ifstream ifs("result_norm.txt");
string text;
text.assign( (istreambuf_iterator<char>(ifs) ),
(istreambuf_iterator<char>() ) );
char arr[text.size()+1];
strcpy(arr,text.c_str());
vector<char*> v;
vector<int> d1;
vector<int> t1;
vector<double> n1;
char* chars_array = strtok(arr, "[");
while(chars_array) {
v.push_back(chars_array);
chars_array = strtok(NULL, "[");
}
bool firstTerm = true, firstNorm = true;
for(size_t n = 0; n < v.size(); ++n)
{
char* subchar_array = strtok(v[n], ",");
while (subchar_array) {
if (n == 0) {
d1.push_back(atoi(subchar_array));
} else if (n == 1) {
if (firstTerm){
d1.pop_back();
firstTerm = false;
}
t1.push_back(atoi(subchar_array));
} else if (n == 2) {
if (firstNorm){
t1.pop_back();
firstNorm = false;
}
if (n1.size() == d1.size())
break;
n1.push_back(atof(subchar_array));
}
subchar_array = strtok(NULL, ",");
}
}
int d[d1.size()];
int t[t1.size()];
double n[n1.size()];
copy(d1.begin(), d1.end(), d);
copy(t1.begin(), t1.end(), t);
copy(n1.begin(), n1.end(), n);
/*
for (size_t i = 0; i < t1.size(); i++) {
printf("%d -> [%d,%d,%f]\n",i,d[i],t[i],n[i]);
}
*/
// Begin InvertedIndex algorithm
int numDocs = d[d1.size()-1];
const int arraySize = sizeof(d)/sizeof(int);
printf("ArraySize: %d\n", arraySize);
const int number_term = 7;
int Df[number_term] = { 0 };
int Index[number_term] = { 0 };
vector<int> IA(arraySize,0);
vector<int> IB(arraySize,0);
vector<double> IC(arraySize,0);
int InvertedIndexA[arraySize];//output
int InvertedIndexB[arraySize];//output
double InvertedIndexC[arraySize];//output
copy(IA.begin(),IA.end(),InvertedIndexA);
copy(IB.begin(),IB.end(),InvertedIndexB);
copy(IC.begin(),IC.end(),InvertedIndexC);
printf("A: %d\n", sizeof(InvertedIndexA)/sizeof(int));
int thread_num = d[arraySize - 1];
int blocks = (arraySize / thread_num) + (arraySize % thread_num != 0 ? 1 : 0);
printf("blocks = %d\n", blocks);
int *a, *b, *df, *index, *invertedIndexA, *invertedIndexB;
double *c, *invertedIndexC, *dn;
double docNorms[numDocs];
cudaMallocManaged(&a, sizeof(d));
cudaMallocManaged(&b, sizeof(t));
cudaMallocManaged(&c, sizeof(n));
cudaMallocManaged(&df, sizeof(Df));
cudaMallocManaged(&index, sizeof(Index));
cudaMallocManaged(&invertedIndexA, sizeof(InvertedIndexA));
cudaMallocManaged(&invertedIndexB, sizeof(InvertedIndexB));
cudaMallocManaged(&invertedIndexC, sizeof(InvertedIndexC));
cudaMallocManaged(&dn,sizeof(docNorms));
cudaMemcpy(a, d, sizeof(d), cudaMemcpyHostToDevice);
cudaMemcpy(b, t, sizeof(t), cudaMemcpyHostToDevice);
cudaMemcpy(c, n, sizeof(n), cudaMemcpyHostToDevice);
cudaMemcpy(df, Df, sizeof(Df), cudaMemcpyHostToDevice);
cudaMemcpy(index, Index, sizeof(Index), cudaMemcpyHostToDevice);
cudaMemcpy(invertedIndexA, InvertedIndexA, sizeof(InvertedIndexA), cudaMemcpyHostToDevice);
cudaMemcpy(invertedIndexB, InvertedIndexB, sizeof(InvertedIndexB), cudaMemcpyHostToDevice);
cudaMemcpy(invertedIndexC, InvertedIndexC, sizeof(InvertedIndexC), cudaMemcpyHostToDevice);
cudaMemcpy(dn,docNorms,sizeof(docNorms),cudaMemcpyHostToDevice);
int Temp[number_term] = { 0 };int *temp;
cudaMallocManaged(&temp, sizeof(Temp));
cudaMemcpy(temp, Temp, sizeof(Temp), cudaMemcpyHostToDevice);
printf("Initial Array:\n");
printf("d:");
printArray <<<1, arraySize>>> (a, sizeof(d) / sizeof(int));
cudaDeviceSynchronize();
printf("\n");
printf("t:");
printArray <<<1, arraySize >>> (b, sizeof(t) / sizeof(int));
cudaDeviceSynchronize();
printf("\n");
printf("Count_number_of_term: \n");
Count_number_of_term <<<blocks, thread_num>>> (b,df);
cudaDeviceSynchronize();
printArray <<<1, thread_num>>> (df, sizeof(Df) / sizeof(int));
cudaDeviceSynchronize();
printf("\n");
printf("Execute the prefix sum by Kogge Stone:\n");
Kogge_Stone_scan_kernel <<<blocks, thread_num>>> (df, index, arraySize, thread_num, temp);
cudaDeviceSynchronize();
//printf("Input count number array to the Kogge Stone:\n");
printArray <<<1, arraySize >>> (index, sizeof(Index) / sizeof(int));
cudaDeviceSynchronize();
printf("\n");
printf("InvertedIndex Array:\n");
Create_InvertedIndexA <<<1, thread_num >>> (a, b, df, index, invertedIndexA);
cudaDeviceSynchronize();
printf("Terms: \n");
for (size_t j = 0; j < arraySize; j++) {
printf(" %d", invertedIndexA[j]);
}
printf("\n\n");
printf("Documents: \n");
Create_InvertedIndexB <<<blocks, thread_num >>> (a, b, c, df, index, invertedIndexB, invertedIndexC);
cudaDeviceSynchronize();
for (size_t j = 0; j < arraySize; j++) {
printf(" %d", invertedIndexB[j]);
}
printf("\n\n");
printf("Norms: \n");
for (size_t j = 0; j < arraySize; j++) {
printf(" %d", invertedIndexB[j]);
}
printf("\n\n");
getDocNorm<<<1,numDocs>>>(a,b,c,dn,d1.size());
cudaDeviceSynchronize();
//Start Querying
ifstream ifq("querydoc.txt");
string qur;
qur.assign( (istreambuf_iterator<char>(ifq) ),
(istreambuf_iterator<char>() ) );
char qarr[qur.size()+1];
strcpy(qarr,qur.c_str());
vector<char*> vq;
vector<int> tq;
vector<double> tf;
char* query_array = strtok(qarr, "[");
while(query_array) {
vq.push_back(query_array);
query_array = strtok(NULL, "[");
}
for(size_t n = 0; n < vq.size(); ++n)
{
char* subchar_array = strtok(vq[n], ",");
while (subchar_array) {
if (n == 0)
tq.push_back(atoi(subchar_array));
else if (n == 1)
tf.push_back(atof(subchar_array));
subchar_array = strtok(NULL, ",");
}
}
int q_size = tq.size();
int qterm[q_size];
double sum[q_size];
double qtermfreq[tf.size()];
copy(tq.begin(), tq.end(), qterm);
copy(tf.begin(), tf.end(), qtermfreq);
int *qtptr;
double *qfptr, *ds;
double docSums[numDocs];
cudaMallocManaged(&qtptr,q_size*sizeof(int));
cudaMallocManaged(&qfptr,q_size*sizeof(double));
cudaMallocManaged(&ds,sizeof(docSums));
cudaMemcpy(qtptr,qterm,q_size *sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(qfptr,qtermfreq,q_size *sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(ds,docSums,sizeof(docSums),cudaMemcpyHostToDevice);
double q_norm = 0;
for (size_t j = 0; j < q_size; j++) {
q_norm+=(qtermfreq[j]*qtermfreq[j]);
}
q_norm = sqrt(q_norm);
knnTD<<<1,q_size>>>(qtptr,qfptr,invertedIndexB,invertedIndexC,ds,index);
cudaDeviceSynchronize();
knn<<<1,numDocs>>>(qtptr,qfptr,ds,dn,q_norm);
cudaDeviceSynchronize();
/*
printf("\n\nDoc Distances:\n");
for (size_t j = 0; j < numDocs; j++) {
printf(" %d -> %f\n",j+1,ds[j]);
}
*/
int docLabel[numDocs];
int *dl;
cudaMallocManaged(&dl,sizeof(docLabel));
cudaMemcpy(dl,docLabel,sizeof(docLabel),cudaMemcpyHostToDevice);
oddEvenSort<<<1,numDocs>>>(ds,dl,numDocs);
cudaDeviceSynchronize();
vector<int> nn;
printf("\nK Nearest Neighbors (k=%d): \n", k);
for (size_t j = 0; j < k; j++) {
if (find(nn.begin(), nn.end(), dl[j]) != nn.end()) {
} else {
nn.push_back(dl[j]/10);
}
printf(" %d -> %f -> label = %d\n", dl[j],ds[j],dl[j]/10);
}
int uniqueN[nn.size()];
copy(nn.begin(), nn.end(), uniqueN);
double kCount[nn.size()];
double *dc;
int *u;
cudaMallocManaged(&dc,sizeof(kCount));
cudaMallocManaged(&u,sizeof(uniqueN));
cudaMemcpy(dc,kCount,sizeof(kCount),cudaMemcpyHostToDevice);
cudaMemcpy(u,uniqueN,sizeof(uniqueN),cudaMemcpyHostToDevice);
classify<<<1,nn.size()>>>(dl,ds,k,dc,u);
cudaDeviceSynchronize();
double max = 0;
int max_i = 0;
for (size_t j = 0; j < nn.size(); j++) {
if(dc[j] > max) {
max = dc[j];
max_i = j;
}
}
printf("\nQuery Document is labelled = %d\n", u[max_i]);
printf("\n");
cudaFree(a);
cudaFree(b);
cudaFree(c);
cudaFree(df);
cudaFree(index);
cudaFree(invertedIndexA);
cudaFree(invertedIndexB);
cudaFree(invertedIndexC);
cudaFree(qtptr);
cudaFree(qfptr);
cudaFree(ds);
cudaFree(dn);
cudaFree(dl);
cudaFree(dc);
return 0;
}
|
8,295 | #include <cstdio>
#if defined(NDEBUG)
#define CUDA_CHECK(x) (x)
#else
#define CUDA_CHECK(x) do {\
(x); \
cudaError_t e = cudaGetLastError(); \
if (cudaSuccess != e) { \
printf("cuda failure \"%s\" at %s:%d\n", \
cudaGetErrorString(e), \
__FILE__, __LINE__); \
exit(1); \
} \
} while (0)
#endif
// kernel program for the device (GPU): compiled by NVCC
__global__ void addKernel(int* c, const int* a, const int* b) {
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
// main program for the CPU: compiled by MS-VC++
int main(void) {
// host-side data
const int SIZE = 5;
const int a[SIZE] = { 1, 2, 3, 4, 5 };
const int b[SIZE] = { 10, 20, 30, 40, 50 };
int c[SIZE] = { 0 };
// device-side data
int* dev_a = 0;
int* dev_b = 0;
int* dev_c = 0;
// allocate device memory
CUDA_CHECK( cudaMalloc((void**)&dev_a, SIZE * sizeof(int)) );
CUDA_CHECK( cudaMalloc((void**)&dev_b, SIZE * sizeof(int)) );
CUDA_CHECK( cudaMalloc((void**)&dev_c, SIZE * sizeof(int)) );
// copy from host to device
CUDA_CHECK( cudaMemcpy(dev_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice) ); // dev_a = a;
CUDA_CHECK( cudaMemcpy(dev_b, b, SIZE * sizeof(int), cudaMemcpyHostToDevice) ); // dev_b = b;
// launch a kernel on the GPU with one thread for each element.
addKernel<<< 1, SIZE>>>( dev_c, dev_a, dev_b ); // dev_c = dev_a + dev_b;
CUDA_CHECK( cudaPeekAtLastError() );
// copy from device to host
CUDA_CHECK( cudaMemcpy(c, dev_c, SIZE * sizeof(int), cudaMemcpyDeviceToHost) ); // c = dev_c;
// free device memory
CUDA_CHECK( cudaFree(dev_c) );
CUDA_CHECK( cudaFree(dev_a) );
CUDA_CHECK( cudaFree(dev_b) );
// print the result
printf("{%d,%d,%d,%d,%d} + {%d,%d,%d,%d,%d} = {%d,%d,%d,%d,%d}\n",
a[0], a[1], a[2], a[3], a[4],
b[0], b[1], b[2], b[3], b[4],
c[0], c[1], c[2], c[3], c[4]);
// done
return 0;
}
|
8,296 | //PROGRAMA QUE SUMA DOS MATRICES EN C++
#include<malloc.h>
#include<cuda.h>
#define Size 1024
__global__ void SumaMatricesCU(int* A,int* B,int* C,int width){
int col=blockIdx.x*blockDim.x + threadIdx.x;//columnas
int row=blockIdx.y*blockDim.y + threadIdx.y;//filas
if((row<width)&&(col<width)){
C[row*width+col] = A[row*width+col]+B[row*width+col];
}
}
void imprimeMatriz(int* A, int width){
for(int i=0;i<width;i++){
for(int j=0;j<width;j++){
printf("%d", A[(i*width)+j]);
}
printf("\n");
}
}
void inicializaMatriz(int* X,int width)
{
for(int i=0; i < width*width ; i++)
{
X[i]=1;
}
}
int main()
{
cudaError_t error = cudaSuccess;
int *h_A,*h_B,*h_C,*d_A,*d_B,*d_C;
int width = 2048;
int size = width * width * sizeof(int);
// reserva de memoria para las matrices en el host
h_A = (int*)malloc(size);
h_B = (int*)malloc(size);
h_C = (int*)malloc(size);
// inicializa matrices
inicializaMatriz(h_A,width);
inicializaMatriz(h_B,width);
// reserva de memoria para las matrices en el device
error = cudaMalloc((void**)&d_A,size);
if(error != cudaSuccess){
printf("Error reservando memoria para d_M");
exit(0);
}
error = cudaMalloc((void**)&d_B,size);
if(error != cudaSuccess){
printf("Error reservando memoria para d_N");
exit(0);
}
error = cudaMalloc((void**)&d_C,size);
if(error != cudaSuccess){
printf("Error reservando memoria para d_P");
exit(0);
}
//copiando del host al device
error = cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice);//destino d_A y origen A
if(error != cudaSuccess){
printf("Error COPIANDO memoria para d_A");
exit(0);
}
error = cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error COPIANDO memoria para d_B");
exit(0);
}
error = cudaMemcpy(d_C,h_C,size,cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error COPIANDO memoria para d_C");
exit(0);
}
//47*63*1024=3032064 esta es la cantidad de hilos que vamos a utilizar para hacer la suma de las matrices
//porque las matrices tienen una dimensión de 2000*1500=3000000
//32*32 = 1024 hilos en cada bloque
//2000/32=63, 1500/32=47
dim3 dimblock(32,32,1);//dimensión de los bloques(cantidad de hilos que se van a utilizar)
dim3 dimGrid(ceil(width/32),ceil(width/32),1);//dimensión de la malla (cantidad de bloques que se van a utilizar)
SumaMatricesCU<<<dimGrid,dimblock>>>(d_A,d_B,d_C,width);
cudaDeviceSynchronize();//espera que termine la funcion anterior
error = cudaMemcpy(h_C,d_C,size,cudaMemcpyDeviceToHost);//copia la operacion relizada en el device al host en el vector C
if(error != cudaSuccess){
printf("Error copiando d to h memoria para d_C");
exit(0);
}
imprimeMatriz(h_C,width);
free(h_A);free(h_B);free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
8,297 | #include <cuComplex.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void
apply_copy_kernel(cuFloatComplex* in, cuFloatComplex* out, int batch_size, int load = 1)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int n = batch_size;
if (i < n) {
for (int x = 0; x < load; x++) {
out[i].x = in[i].x;
out[i].y = in[i].y;
}
}
}
void apply_copy(cuFloatComplex* in,
cuFloatComplex* out,
int grid_size,
int block_size,
int load,
cudaStream_t stream)
{
int batch_size = block_size * grid_size;
apply_copy_kernel<<<grid_size, block_size, 0, stream>>>(in, out, batch_size, load);
}
void get_block_and_grid(int* minGrid, int* minBlock)
{
cudaOccupancyMaxPotentialBlockSize(minGrid, minBlock, apply_copy_kernel, 0, 0);
} |
8,298 | #include <iostream>
#include <string>
#include <cuda_runtime.h>
using namespace std;
string getDeviceArchitecture( cudaDeviceProp devProp )
{
string sign = "";
switch( devProp.major )
{
case 2:
sign = "Fermi";
break;
case 3:
sign = "Kepler";
break;
case 5:
sign = "Maxwell";
break;
case 6:
sign = "Pascal";
break;
case 7:
sign = "Volta or Turing";
break;
case 8:
sign = "Ampere";
break;
default:
sign = "Unknown device type";
break;
}
return sign;
}
int getSPcores( cudaDeviceProp devProp )
{
int cores = 0;
int mp = devProp.multiProcessorCount;
switch( devProp.major )
{
case 2:
if( devProp.minor == 1 ) cores = mp * 48;
else cores = mp * 32;
break;
case 3:
cores = mp * 192;
break;
case 5:
cores = mp * 128;
break;
case 6:
if( ( devProp.minor == 1 ) || ( devProp.minor == 2 ) ) cores = mp * 128;
else if( devProp.minor == 0 ) cores = mp * 64;
else cout << "Unknown device type\n";
break;
case 7:
if( ( devProp.minor == 0 ) || ( devProp.minor == 5 ) ) cores = mp * 64;
else cout << "Unknown device type\n";
break;
case 8:
if( devProp.minor == 0 ) cores = mp * 64;
else if( devProp.minor == 6 ) cores = mp * 128;
else cout << "Unknown device type\n";
break;
default:
cout << "Unknown device type\n";
break;
}
return cores;
}
void printDevProp( int i )
{
cudaDeviceProp devProp;
cudaGetDeviceProperties( &devProp, i );
cout << " - ASCII string identifying device: " << devProp.name << "\n";
cout << " - Device architecture name: " << getDeviceArchitecture( devProp ) << "\n";
cout << " - Major compute capability: " << devProp.major << "\n";
cout << " - Minor compute capability: " << devProp.minor << "\n";
cout << " - Number of multiprocessors on device: " << devProp.multiProcessorCount << "\n";
cout << " - Number of CUDA cores: " << getSPcores( devProp ) << "\n";
cout << " - Global memory available on device in bytes: " << devProp.totalGlobalMem << "\n";
cout << " - Shared memory available per block in bytes: " << devProp.sharedMemPerBlock << "\n";
cout << " - 32-bit registers available per block: " << devProp.regsPerBlock << "\n";
cout << " - Warp size in threads: " << devProp.warpSize << "\n";
cout << " - Maximum pitch in bytes allowed by memory copies: " << devProp.memPitch << "\n";
cout << " - Maximum number of threads per block: " << devProp.maxThreadsPerBlock << "\n";
for( int i = 0 ; i < 3 ; ++i )
cout << " - Maximum dimension " << i << " of the grid: " << devProp.maxGridSize[i] << "\n";
for ( int i = 0 ; i < 3 ; ++i )
cout << " - Maximum dimension " << i << " of the block: " << devProp.maxThreadsDim[i] << "\n";
cout << " - Clock frequency in kilohertz: " << devProp.clockRate << "\n";
cout << " - Constant memory available on device in bytes: " << devProp.totalConstMem << "\n";
cout << " - Number of asynchronous engines: " << devProp.asyncEngineCount << "\n";
cout << " - Specified whether there is a run time limit on kernels: " << devProp.kernelExecTimeoutEnabled << "\n";
cout << " - Alignment requirement for textures: " << devProp.textureAlignment << "\n";
}
int main( int argc, char* argv[] )
{
int devCount;
cudaGetDeviceCount( &devCount );
cout << "##################################################\n";
cout << "\t > CUDA Device Specifications <\n";
cout << "\t (Total CUDA devices: " << devCount << ")\n";
for ( int i = 0 ; i < devCount ; ++i )
{
cout << "##################################################\n";
cout << "+ CUDA device: " << i << "\n";
printDevProp( i );
cout << "##################################################\n\n";
}
system( "pause" );
return 0;
}
|
8,299 | #include "includes.h"
// filename: vmult!.cu
// a simple CUDA kernel to element multiply two vectors C=alpha*A.*B
extern "C" // ensure function name to be exactly "vmultbang"
{
}
__global__ void gaxpy4(const int n, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < n) {
c[i] = (double) i; // REMEMBER ZERO INDEXING IN C LANGUAGE!!
}
} |
8,300 | #include <time.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
void CPUEuler(float t_0, float y_0, float delta_t){
int n=10/delta_t +1;
float *y = (float*) malloc(sizeof(float)*n);
y[0]=y_0;
for (int i=0;i<n-1;i++){
y[i+1]=y[i]+delta_t*(9*powf(i*delta_t,2)-4*(i*delta_t)+5);
}
free(y);
//return y;
}
int main(int argc, char const *argv[]) {
printf("seccion 1.a\n");
clock_t start, end;
for(int i=1;i<7;i++) {
float delta_t=powf(10,-i);
start=clock();
CPUEuler(0,4,delta_t);
end=clock();
double cpu_time_used = ((double) (end - start)) * 1000 / CLOCKS_PER_SEC;
printf("%f\n",cpu_time_used);
}
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.