serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
11,901
// DESCRIPTION HERE #include <stdio.h> #include <stdint.h> #include <iostream> #include <chrono> #define NUM_THREADS 448 #define NUM_CHARS 26 #define SHARED_MEM_SIZE NUM_THREADS * NUM_CHARS // Calculate how much padding is needed to make the file evenly divided into 448 chunks size_t calculatePadding(size_t fileSize) { size_t paddingBytes = NUM_THREADS - (fileSize % NUM_THREADS); return paddingBytes; } // Pad the data so it is evenly divided into 448 chunks void padData(uint8_t * buf, size_t bytesRead, size_t numPadBytes) { for (size_t i = 0; i < numPadBytes; i++) { buf[bytesRead + i] = 'a'; } } // Read a file into a byte array uint8_t * readFile(const char * filename, size_t * outBytesRead, size_t * paddingBytes) { FILE *handle = fopen(filename, "rb"); fseek(handle, 0, SEEK_END); *outBytesRead = ftell(handle); *paddingBytes = calculatePadding(*outBytesRead); rewind(handle); uint8_t * buf = (uint8_t *) malloc((*outBytesRead + *paddingBytes)*sizeof(uint8_t)); fread(buf, *outBytesRead, 1, handle); fclose(handle); padData(buf, *outBytesRead, *paddingBytes); return buf; } // Shift all ascii letters so that 'a' is index 0, 'b' is index 1, etc. __device__ __constant__ int shiftAmount; __global__ void shiftLetters(uint8_t *data) { uint32_t threadId = blockIdx.x * blockDim.x + threadIdx.x; data[threadId] = data[threadId] - shiftAmount; } // Zero out the letter counts __device__ void zeroLetterCounts(uint32_t * letterCounts) { for (size_t i = 0; i < NUM_CHARS; i++) { letterCounts[(threadIdx.x * NUM_CHARS) + i] = 0; } } // Count the occurence of each letter in *data __device__ void countLetters(uint8_t *data, uint32_t *letterCounts, uint32_t *threadLetterCounts, size_t chunkSize) { zeroLetterCounts(threadLetterCounts); __syncthreads(); // Tally letters for each thread for (size_t i = 0; i < chunkSize; i++) { threadLetterCounts[(threadIdx.x * NUM_CHARS) + data[(threadIdx.x * chunkSize) + i]]++; } __syncthreads(); // Total local thread tallys if (threadIdx.x < NUM_CHARS) { for (size_t i = 0; i < NUM_THREADS; i++) { letterCounts[threadIdx.x] += threadLetterCounts[threadIdx.x + (i * NUM_CHARS)]; } } } // Count the occurence of each letter in *data using shared memory __global__ void countLettersShared(uint8_t *data, uint32_t *letterCounts, size_t chunkSize) { __shared__ uint32_t sharedLetterCounts[SHARED_MEM_SIZE]; countLetters(data, letterCounts, sharedLetterCounts, chunkSize); } // Count the occurence of each letter in *data using global memory __global__ void countLettersGlobal(uint8_t *data, uint32_t *letterCounts, uint32_t * threadLetterCounts, size_t chunkSize) { countLetters(data, letterCounts, threadLetterCounts, chunkSize); } // Remove any padding so that letter counts are accurage void unpadResult(uint32_t * letterCounts, size_t paddingBytes) { letterCounts[0] -= paddingBytes; } // Count the occurence of each letter in *data using shared memory uint64_t countWithGPUShared(uint8_t * data, size_t dataSize, uint32_t * letterCounts, size_t textChunkSize) { // Declare cuda memory uint8_t *gpuData; uint32_t *gpuLetterCounts; cudaMalloc((void **)&gpuData, dataSize); cudaMemcpy(gpuData, data, dataSize, cudaMemcpyHostToDevice); cudaMalloc((void **)&gpuLetterCounts, NUM_CHARS * sizeof(uint32_t)); cudaMemcpy(gpuLetterCounts, letterCounts, NUM_CHARS * sizeof(uint32_t), cudaMemcpyHostToDevice); // Run Kernel auto start = std::chrono::high_resolution_clock::now(); shiftLetters<<<textChunkSize, NUM_THREADS>>>(gpuData); countLettersShared<<<1, NUM_THREADS>>>(gpuData, gpuLetterCounts, textChunkSize); auto stop = std::chrono::high_resolution_clock::now(); cudaMemcpy(letterCounts, gpuLetterCounts, NUM_CHARS * sizeof(uint32_t), cudaMemcpyDeviceToHost); // Free the arrays on the GPU as now we're done with them cudaFree(gpuData); cudaFree(gpuLetterCounts); return std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); } // Count the occurence of each letter in *data using global memory uint64_t countWithGPUGlobal(uint8_t * data, size_t dataSize, uint32_t * letterCounts, size_t textChunkSize) { // Declare cuda memory uint8_t *gpuData; uint32_t *gpuLetterCounts; uint32_t *threadLetterCounts; cudaMalloc((void **)&gpuData, dataSize); cudaMemcpy(gpuData, data, dataSize, cudaMemcpyHostToDevice); cudaMalloc((void **)&gpuLetterCounts, NUM_CHARS * sizeof(uint32_t)); cudaMemcpy(gpuLetterCounts, letterCounts, NUM_CHARS * sizeof(uint32_t), cudaMemcpyHostToDevice); cudaMalloc((void **)&threadLetterCounts, SHARED_MEM_SIZE); // Run Kernel auto start = std::chrono::high_resolution_clock::now(); shiftLetters<<<textChunkSize, NUM_THREADS>>>(gpuData); countLettersGlobal<<<1, NUM_THREADS>>>(gpuData, gpuLetterCounts, threadLetterCounts, textChunkSize); auto stop = std::chrono::high_resolution_clock::now(); cudaMemcpy(letterCounts, gpuLetterCounts, NUM_CHARS * sizeof(uint32_t), cudaMemcpyDeviceToHost); /* Free the arrays on the GPU as now we're done with them */ cudaFree(gpuData); cudaFree(gpuLetterCounts); return std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); } // Use the CPU to count the occurrences of each letter in *data uint64_t countWithCPU(uint8_t * data, size_t dataSize, uint32_t * letterCounts, int ascii_a) { auto start = std::chrono::high_resolution_clock::now(); for (size_t i = 0; i < dataSize; i++) { letterCounts[data[i] - ascii_a]++; } auto stop = std::chrono::high_resolution_clock::now(); return std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); } // Display letter counts void displayResults(uint32_t * letterCounts) { printf("\n\n"); for (size_t i = 0; i < NUM_CHARS; i++) { printf("Found %d %c's\n", letterCounts[i], i + 0x61); } printf("\n\n"); } // Display and analyze the run times (shared vs. global vs. CPU) void displayTimingResults(uint64_t gpuSharedDuration, uint64_t gpuGlobalDuration, uint64_t cpuDuration) { printf("Took %dns to run processing on GPU with shared memory\n", gpuSharedDuration); printf("Took %dns to run processing on GPU with global memory\n", gpuGlobalDuration); printf("Took %dns to run on CPU\n", cpuDuration); printf("\n"); printf("Shared Memory runs %fx faster than global memory\n", ((double)gpuGlobalDuration) / gpuSharedDuration); printf("Shared Memory on GPU runs %fx faster than the CPU\n", ((double)cpuDuration) / gpuSharedDuration); printf("\n"); } int main(int argc, char* argv[]) { // Read command line args std::string fileName = "all_letter.shakespeare.txt"; if (argc > 1) { fileName = argv[1]; } // Copy from host to constant memory const int ascii_a = 0x61; cudaMemcpyToSymbol(shiftAmount, &ascii_a, sizeof(uint8_t)); // Declare some variables uint32_t letterCounts[NUM_CHARS]; size_t bytesRead; size_t paddingBytes; // Read file uint8_t * data = readFile(fileName.c_str(), &bytesRead, &paddingBytes); // Calculate run-time parameters size_t dataSize = bytesRead + paddingBytes; size_t textChunkSize = dataSize / NUM_THREADS; printf("Bytes read: %d\n", bytesRead); printf("Padding bytes: %d\n", paddingBytes); uint8_t *pinnedData; cudaMallocHost((void**)&pinnedData, dataSize); memcpy(pinnedData, data, dataSize); // Run letter counter on the CPU memset(letterCounts, 0, NUM_CHARS * sizeof(uint32_t)); uint64_t cpuDuration = countWithCPU(pinnedData, dataSize, letterCounts, ascii_a); // Run letter counter on the GPU with global memory memset(letterCounts, 0, NUM_CHARS * sizeof(uint32_t)); uint64_t gpuGlobalDuration = countWithGPUGlobal(pinnedData, dataSize, letterCounts, textChunkSize); // Run letter counter on the GPU with shared memory memset(letterCounts, 0, NUM_CHARS * sizeof(uint32_t)); uint64_t gpuSharedDuration = countWithGPUShared(pinnedData, dataSize, letterCounts, textChunkSize); unpadResult(letterCounts, paddingBytes); // Display letter counts and timing displayResults(letterCounts); displayTimingResults(gpuSharedDuration, gpuGlobalDuration, cpuDuration); return EXIT_SUCCESS; }
11,902
/** * Nearest neighbor search * マップ内に、店、工場などのゾーンがある確率で配備されている時、 * 住宅ゾーンから直近の店、工場までのマンハッタン距離を計算する。 * * 各店、工場から周辺に再帰的に距離を更新していくので、O(N)で済む。 * しかも、GPUで並列化することで、さらに計算時間を短縮できる。 * * shared memoryを使用して高速化できるか? */ #include <stdio.h> #include <stdlib.h> #include <vector> #include <list> #include <time.h> #define CITY_SIZE 10 //200 #define GPU_BLOCK_SIZE 10//40 #define GPU_NUM_THREADS 8 //96 #define GPU_BLOCK_SCALE (1.0)//(1.1) #define NUM_FEATURES 1 #define QUEUE_MAX 1999 #define MAX_DIST 99 #define HISTORY_SIZE 10000 #define CUDA_CALL(x) {if((x) != cudaSuccess){ \ printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \ printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \ exit(EXIT_FAILURE);}} struct ZoneType { int type; int level; }; struct ZoningPlan { ZoneType zones[CITY_SIZE][CITY_SIZE]; }; struct DistanceMap { int distances[CITY_SIZE][CITY_SIZE][NUM_FEATURES]; }; struct Point2D { int x; int y; __host__ __device__ Point2D() : x(0), y(0) {} __host__ __device__ Point2D(int x, int y) : x(x), y(y) {} }; __host__ __device__ unsigned int rand(unsigned int* randx) { *randx = *randx * 1103515245 + 12345; return (*randx)&2147483647; } __host__ __device__ float randf(unsigned int* randx) { return rand(randx) / (float(2147483647) + 1); } __host__ __device__ float randf(unsigned int* randx, float a, float b) { return randf(randx) * (b - a) + a; } __host__ __device__ int sampleFromCdf(unsigned int* randx, float* cdf, int num) { float rnd = randf(randx, 0, cdf[num-1]); for (int i = 0; i < num; ++i) { if (rnd <= cdf[i]) return i; } return num - 1; } __host__ __device__ int sampleFromPdf(unsigned int* randx, float* pdf, int num) { if (num == 0) return 0; float cdf[40]; cdf[0] = pdf[0]; for (int i = 1; i < num; ++i) { if (pdf[i] >= 0) { cdf[i] = cdf[i - 1] + pdf[i]; } else { cdf[i] = cdf[i - 1]; } } return sampleFromCdf(randx, cdf, num); } /** * ゾーンプランを生成する。 */ __host__ void generateZoningPlan(ZoningPlan& zoningPlan, std::vector<float> zoneTypeDistribution) { std::vector<float> numRemainings(NUM_FEATURES + 1); for (int i = 0; i < NUM_FEATURES + 1; ++i) { numRemainings[i] = CITY_SIZE * CITY_SIZE * zoneTypeDistribution[i]; } unsigned int randx = 0; for (int r = 0; r < CITY_SIZE; ++r) { for (int c = 0; c < CITY_SIZE; ++c) { int type = sampleFromPdf(&randx, numRemainings.data(), numRemainings.size()); zoningPlan.zones[r][c].type = type; numRemainings[type] -= 1; } } } /** * 直近の店までの距離を計算する(マルチスレッド版、shared memory使用) */ __global__ void computeDistanceToStore(ZoningPlan* zoningPlan, DistanceMap* distanceMap, uint3* devQueue, int* devQueueStart, int* devQueueEnd, uint3* devHistory) { __shared__ int sDist[(int)(GPU_BLOCK_SIZE * GPU_BLOCK_SCALE)][(int)(GPU_BLOCK_SIZE * GPU_BLOCK_SCALE)][NUM_FEATURES]; __shared__ uint3 sQueue[QUEUE_MAX + 1]; __shared__ unsigned int queue_begin; __shared__ unsigned int queue_end; __shared__ unsigned int history_index; queue_begin = 0; queue_end = 0; history_index = 0; __syncthreads(); // queueを初期化 for (int i = threadIdx.x; i < QUEUE_MAX + 1; i += GPU_NUM_THREADS) { sQueue[i] = make_uint3(CITY_SIZE * 100, CITY_SIZE * 100, NUM_FEATURES); } // global memoryからshared memoryへコピー int num_strides = (GPU_BLOCK_SIZE * GPU_BLOCK_SIZE * GPU_BLOCK_SCALE * GPU_BLOCK_SCALE + GPU_NUM_THREADS - 1) / GPU_NUM_THREADS; int r0 = blockIdx.y * GPU_BLOCK_SIZE - GPU_BLOCK_SIZE * (GPU_BLOCK_SCALE - 1) * 0.5; int c0 = blockIdx.x * GPU_BLOCK_SIZE - GPU_BLOCK_SIZE * (GPU_BLOCK_SCALE - 1) * 0.5; for (int i = 0; i < num_strides; ++i) { int r1 = (i * GPU_NUM_THREADS + threadIdx.x) / (int)(GPU_BLOCK_SIZE * GPU_BLOCK_SCALE); int c1 = (i * GPU_NUM_THREADS + threadIdx.x) % (int)(GPU_BLOCK_SIZE * GPU_BLOCK_SCALE); // これ、忘れてた!! if (r1 >= GPU_BLOCK_SIZE * GPU_BLOCK_SCALE || c1 >= GPU_BLOCK_SIZE * GPU_BLOCK_SCALE) continue; int type = 0; if (r0 + r1 >= 0 && r0 + r1 < CITY_SIZE && c0 + c1 >= 0 && c0 + c1 < CITY_SIZE) { type = zoningPlan->zones[r0 + r1][c0 + c1].type; } for (int feature_id = 0; feature_id < NUM_FEATURES; ++feature_id) { distanceMap->distances[r0 + r1][c0 + c1][feature_id] = MAX_DIST; if (type - 1 == feature_id) { sDist[r1][c1][feature_id] = 0; unsigned int q_index = atomicInc(&queue_end, QUEUE_MAX); sQueue[q_index] = make_uint3(c1, r1, feature_id); } else { sDist[r1][c1][feature_id] = MAX_DIST; } } } __syncthreads(); // 距離マップを生成 unsigned int q_index; //while ((q_index = atomicInc(&queue_begin, QUEUE_MAX)) < queue_end) { for (int iter = 0; iter < 7000; ++iter) { q_index = atomicInc(&queue_begin, QUEUE_MAX); //while (queue_begin < queue_end) { uint3 pt = sQueue[q_index]; if (pt.x < 0 || pt.x >= GPU_BLOCK_SIZE * GPU_BLOCK_SCALE || pt.y < 0 || pt.y >= GPU_BLOCK_SIZE * GPU_BLOCK_SCALE) { break; } int d = sDist[pt.y][pt.x][pt.z]; int count = 0; if (pt.y > 0) { unsigned int old = atomicMin(&sDist[pt.y-1][pt.x][pt.z], d + 1); if (old > d + 1) { unsigned int q_index2 = atomicInc(&queue_end, QUEUE_MAX); sQueue[q_index2] = make_uint3(pt.x, pt.y-1, pt.z); count++; if (count == 60) { unsigned int h_ind = atomicInc(&history_index, HISTORY_SIZE - 1); //devHistory[h_ind] = make_uint3(pt.x + pt.y * CITY_SIZE, pt.x + (pt.y-1) * CITY_SIZE, pt.z); devHistory[h_ind] = make_uint3(pt.x + pt.y * CITY_SIZE, pt.x + (pt.y-1) * CITY_SIZE, q_index2); } } } if (pt.y < CITY_SIZE - 1) { unsigned int old = atomicMin(&sDist[pt.y+1][pt.x][pt.z], d + 1); if (old > d + 1) { unsigned int q_index2 = atomicInc(&queue_end, QUEUE_MAX); sQueue[q_index2] = make_uint3(pt.x, pt.y+1, pt.z); count++; if (count == 60) { unsigned int h_ind = atomicInc(&history_index, HISTORY_SIZE - 1); //devHistory[h_ind] = make_uint3(pt.x + pt.y * CITY_SIZE, pt.x + (pt.y+1) * CITY_SIZE, pt.z); devHistory[h_ind] = make_uint3(pt.x + pt.y * CITY_SIZE, pt.x + (pt.y+1) * CITY_SIZE, q_index2); } } } if (pt.x > 0) { unsigned int old = atomicMin(&sDist[pt.y][pt.x-1][pt.z], d + 1); if (old > d + 1) { unsigned int q_index2 = atomicInc(&queue_end, QUEUE_MAX); sQueue[q_index2] = make_uint3(pt.x-1, pt.y, pt.z); count++; if (count == 60) { unsigned int h_ind = atomicInc(&history_index, HISTORY_SIZE - 1); //devHistory[h_ind] = make_uint3(pt.x + pt.y * CITY_SIZE, pt.x-1 + pt.y * CITY_SIZE, pt.z); devHistory[h_ind] = make_uint3(pt.x + pt.y * CITY_SIZE, pt.x-1 + pt.y * CITY_SIZE, q_index2); } } } if (pt.x < CITY_SIZE - 1) { unsigned int old = atomicMin(&sDist[pt.y][pt.x+1][pt.z], d + 1); if (old > d + 1) { unsigned int q_index2 = atomicInc(&queue_end, QUEUE_MAX); sQueue[q_index2] = make_uint3(pt.x+1, pt.y, pt.z); count++; if (count == 60) { unsigned int h_ind = atomicInc(&history_index, HISTORY_SIZE - 1); //devHistory[h_ind] = make_uint3(pt.x + pt.y * CITY_SIZE, pt.x+1 + pt.y * CITY_SIZE, pt.z); devHistory[h_ind] = make_uint3(pt.x + pt.y * CITY_SIZE, pt.x+1 + pt.y * CITY_SIZE, q_index2); } } } sQueue[q_index] = make_uint3(CITY_SIZE * 100, CITY_SIZE * 100, NUM_FEATURES); } __syncthreads(); // global memoryの距離マップへ、コピーする for (int i = 0; i < num_strides; ++i) { int r1 = (i * GPU_NUM_THREADS + threadIdx.x) / (int)(GPU_BLOCK_SIZE * GPU_BLOCK_SCALE); int c1 = (i * GPU_NUM_THREADS + threadIdx.x) % (int)(GPU_BLOCK_SIZE * GPU_BLOCK_SCALE); // これ、忘れてた!! if (r1 >= GPU_BLOCK_SIZE * GPU_BLOCK_SCALE || c1 >= GPU_BLOCK_SIZE * GPU_BLOCK_SCALE) continue; if (r0 + r1 >= 0 && r0 + r1 < CITY_SIZE && c0 + c1 >= 0 && c0 + c1 < CITY_SIZE) { for (int feature_id = 0; feature_id < NUM_FEATURES; ++feature_id) { atomicMin(&distanceMap->distances[r0 + r1][c0 + c1][feature_id], sDist[r1][c1][feature_id]); } } } // デバッグ用に、キュー情報をglobalにコピー for (int i = 0; i < queue_end; ++i) { devQueue[i] = sQueue[i]; } *devQueueStart = queue_begin; *devQueueEnd = queue_end; } /** * 直近の店までの距離を計算する(CPU版) */ __host__ void computeDistanceToStoreCPU(ZoningPlan* zoningPLan, DistanceMap* distanceMap) { std::list<int3> queue; for (int feature_id = 0; feature_id < NUM_FEATURES; ++feature_id) { for (int cell_id = 0; cell_id < CITY_SIZE * CITY_SIZE; ++cell_id) { int r = cell_id / CITY_SIZE; int c = cell_id % CITY_SIZE; if (zoningPLan->zones[r][c].type - 1 == feature_id) { queue.push_back(make_int3(c, r, feature_id)); distanceMap->distances[r][c][feature_id] = 0; } else { distanceMap->distances[r][c][feature_id] = MAX_DIST; } } } while (!queue.empty()) { int3 pt = queue.front(); queue.pop_front(); int d = distanceMap->distances[pt.y][pt.x][pt.z]; if (pt.y > 0) { if (distanceMap->distances[pt.y-1][pt.x][pt.z] > d + 1) { distanceMap->distances[pt.y-1][pt.x][pt.z] = d + 1; queue.push_back(make_int3(pt.x, pt.y-1, pt.z)); } } if (pt.y < CITY_SIZE - 1) { if (distanceMap->distances[pt.y+1][pt.x][pt.z] > d + 1) { distanceMap->distances[pt.y+1][pt.x][pt.z] = d + 1; queue.push_back(make_int3(pt.x, pt.y+1, pt.z)); } } if (pt.x > 0) { if (distanceMap->distances[pt.y][pt.x-1][pt.z] > d + 1) { distanceMap->distances[pt.y][pt.x-1][pt.z] = d + 1; queue.push_back(make_int3(pt.x-1, pt.y, pt.z)); } } if (pt.x < CITY_SIZE - 1) { if (distanceMap->distances[pt.y][pt.x+1][pt.z] > d + 1) { distanceMap->distances[pt.y][pt.x+1][pt.z] = d + 1; queue.push_back(make_int3(pt.x+1, pt.y, pt.z)); } } } } /** * デバッグ用に、ゾーンプランを表示する。 */ __host__ void showDevZoningPlan(ZoningPlan* zoningPlan) { ZoningPlan plan; CUDA_CALL(cudaMemcpy(&plan, zoningPlan, sizeof(ZoningPlan), cudaMemcpyDeviceToHost)); printf("Zone plan:\n"); for (int r = CITY_SIZE - 1; r >= 0; --r) { for (int c = 0; c < CITY_SIZE; ++c) { printf("%2d, ", plan.zones[r][c].type); } printf("\n"); } printf("\n"); } /** * デバッグ用に、距離マップを表示する。 */ __host__ void showDevDistMap(DistanceMap* distMap, int feature_id) { DistanceMap map; CUDA_CALL(cudaMemcpy(&map, distMap, sizeof(DistanceMap), cudaMemcpyDeviceToHost)); printf("Distance map:\n"); for (int r = CITY_SIZE - 1; r >= 0; --r) { for (int c = 0; c < CITY_SIZE; ++c) { printf("%2d, ", map.distances[r][c][feature_id]); } printf("\n"); } printf("\n"); } /** * デバッグ用に、キューの内容を表示する。 */ __host__ void showDevQueue(uint3* queue, int* queueBegin, int* queueEnd, int featureId) { uint3 q[QUEUE_MAX + 1]; int begin; int end; CUDA_CALL(cudaMemcpy(q, queue, sizeof(uint3) * (QUEUE_MAX + 1), cudaMemcpyDeviceToHost)); CUDA_CALL(cudaMemcpy(&begin, queueBegin, sizeof(int), cudaMemcpyDeviceToHost)); CUDA_CALL(cudaMemcpy(&end, queueEnd, sizeof(int), cudaMemcpyDeviceToHost)); printf("Queue:\n"); printf("Begin: %d, End: %d, featureId: %d\n", begin, end, featureId); int num = end - begin + 1; if (num < 0) { num = end + QUEUE_MAX + 1 - begin + 1; } for (int i = 0; i < num; ++i) { if (q[(begin + i) % (QUEUE_MAX + 1)].z == featureId) { printf("%3d: %3d, %3d\n", (begin + i) % (QUEUE_MAX + 1), q[(begin + i) % (QUEUE_MAX + 1)].x, q[(begin + i) % (QUEUE_MAX + 1)].y); } } printf("\n"); } __host__ void showDevHistory(uint3* devHistory) { uint3 history[HISTORY_SIZE]; CUDA_CALL(cudaMemcpy(history, devHistory, sizeof(uint3) * HISTORY_SIZE, cudaMemcpyDeviceToHost)); printf("History:\n"); for (int i = 0; i < 100; ++i) { int x0 = history[i].x % CITY_SIZE; int y0 = history[i].x / CITY_SIZE; int x1 = history[i].y % CITY_SIZE; int y1 = history[i].y / CITY_SIZE; int featureId = history[i].z; printf("%2d: (%3d, %3d) -> (%3d, %3d) (%d)\n", i, x0, y0, x1, y1, featureId); } printf("\n"); } int main() { time_t start, end; ZoningPlan* hostZoningPlan = (ZoningPlan*)malloc(sizeof(ZoningPlan)); DistanceMap* hostDistanceMap = (DistanceMap*)malloc(sizeof(DistanceMap)); DistanceMap* hostDistanceMap2 = (DistanceMap*)malloc(sizeof(DistanceMap)); // 距離を初期化 //memset(hostDistanceMap, MAX_DIST, sizeof(DistanceMap)); //memset(hostDistanceMap2, MAX_DIST, sizeof(DistanceMap)); std::vector<float> zoneTypeDistribution(6); zoneTypeDistribution[0] = 0.5f; zoneTypeDistribution[1] = 0.2f; zoneTypeDistribution[2] = 0.1f; zoneTypeDistribution[3] = 0.1f; zoneTypeDistribution[4] = 0.05f; zoneTypeDistribution[5] = 0.05f; // 初期プランを生成 start = clock(); generateZoningPlan(*hostZoningPlan, zoneTypeDistribution); end = clock(); printf("generateZoningPlan: %lf\n", (double)(end-start)/CLOCKS_PER_SEC); // 初期プランをデバイスバッファへコピー ZoningPlan* devZoningPlan; CUDA_CALL(cudaMalloc((void**)&devZoningPlan, sizeof(ZoningPlan))); CUDA_CALL(cudaMemcpy(devZoningPlan, hostZoningPlan, sizeof(ZoningPlan), cudaMemcpyHostToDevice)); // デバッグ用に、初期プランを表示 if (CITY_SIZE <= 100) { showDevZoningPlan(devZoningPlan); } // 距離マップ用に、デバイスバッファを確保 DistanceMap* devDistanceMap; CUDA_CALL(cudaMalloc((void**)&devDistanceMap, sizeof(DistanceMap))); /////////////////////////////////////////////////////////////////////// // CPU版で、直近の店までの距離を計算 /* start = clock(); for (int iter = 0; iter < 1000; ++iter) { computeDistanceToStoreCPU(hostZoningPlan, hostDistanceMap2); } end = clock(); printf("computeDistanceToStore CPU: %lf\n", (double)(end-start)/CLOCKS_PER_SEC); */ int* devQueueStart; CUDA_CALL(cudaMalloc((void**)&devQueueStart, sizeof(int))); int* devQueueEnd; CUDA_CALL(cudaMalloc((void**)&devQueueEnd, sizeof(int))); printf("start...\n"); // デバッグ用に、globalにqueueメモリを確保 uint3* devQueue; CUDA_CALL(cudaMalloc((void**)&devQueue, sizeof(uint3) * (QUEUE_MAX + 1))); // デバッグ用に、globalにwavefrontメモリを確保 uint3* devHistory; CUDA_CALL(cudaMalloc((void**)&devHistory, sizeof(uint3) * HISTORY_SIZE)); /* /////////////////////////////////////////////////////////////////////// // warmp up computeDistanceToStore<<<dim3(CITY_SIZE / GPU_BLOCK_SIZE, CITY_SIZE / GPU_BLOCK_SIZE), GPU_NUM_THREADS>>>(devZoningPlan, devDistanceMap, devQueue); // マルチスレッドで、直近の店までの距離を計算 start = clock(); for (int iter = 0; iter < 1000; ++iter) { computeDistanceToStore<<<dim3(CITY_SIZE / GPU_BLOCK_SIZE, CITY_SIZE / GPU_BLOCK_SIZE), GPU_NUM_THREADS>>>(devZoningPlan, devDistanceMap, devQueue); cudaDeviceSynchronize(); } end = clock(); printf("computeDistanceToStore GPU: %lf\n", (double)(end-start)/CLOCKS_PER_SEC); */ // テンポラリ デバッグ用 // バグの対処中。。。。 FILE* fp = fopen("zone.txt", "r"); for (int r = 0; r < CITY_SIZE; ++r) { for (int c = 0; c < CITY_SIZE; ++c) { fscanf(fp, "%d,", &hostZoningPlan->zones[r][c].type); } } CUDA_CALL(cudaMemcpy(devZoningPlan, hostZoningPlan, sizeof(ZoningPlan), cudaMemcpyHostToDevice)); showDevZoningPlan(devZoningPlan); computeDistanceToStore<<<dim3(CITY_SIZE / GPU_BLOCK_SIZE, CITY_SIZE / GPU_BLOCK_SIZE), GPU_NUM_THREADS>>>(devZoningPlan, devDistanceMap, devQueue, devQueueStart, devQueueEnd, devHistory); cudaDeviceSynchronize(); // 距離マップを表示 showDevDistMap(devDistanceMap, 0); // キューを表示 showDevQueue(devQueue, devQueueStart, devQueueEnd, 0); // hisotryを表示 //showDevHistory(devHistory); /* // compare the results with the CPU version of the exact algrotihm int bad_k = 0; int bad_count = 0; { for (int r = CITY_SIZE - 1; r >= 0; --r) { for (int c = 0; c < CITY_SIZE; ++c) { for (int k = 0; k < NUM_FEATURES; ++k) { if (hostDistanceMap->distances[r][c][k] != hostDistanceMap2->distances[r][c][k]) { if (bad_count == 0) { printf("ERROR! %d,%d k=%d, %d != %d\n", r, c, k, hostDistanceMap->distances[r][c][k], hostDistanceMap2->distances[r][c][k]); bad_k = k; } bad_count++; } } } } } // for debug if (CITY_SIZE <= 200 && bad_count > 0) { for (int r = CITY_SIZE - 1; r >= 0; --r) { for (int c = 0; c < CITY_SIZE; ++c) { printf("%d, ", hostDistanceMap->distances[r][c][bad_k]); } printf("\n"); } printf("\n"); for (int r = CITY_SIZE - 1; r >= 0; --r) { for (int c = 0; c < CITY_SIZE; ++c) { printf("%d, ", hostDistanceMap2->distances[r][c][bad_k]); } printf("\n"); } printf("\n"); } printf("Total error: %d\n", bad_count); */ // release device buffer cudaFree(devZoningPlan); cudaFree(devDistanceMap); // release host buffer free(hostZoningPlan); free(hostDistanceMap); free(hostDistanceMap2); //cudaDeviceReset(); }
11,903
#include "includes.h" __global__ void post_scan(float* in, float* add, int len) { unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x * BLOCK_SIZE; if (blockIdx.x) { if (start + t < len) in[start + t] += add[blockIdx.x - 1]; if (start + BLOCK_SIZE + t < len) in[start + BLOCK_SIZE + t] += add[blockIdx.x - 1]; } }
11,904
// D.run ["micros/regfile-layout.cu","--arch=sm_75","-o=micros/sm_75/regfile-layout.sass","-lines","--save-ptx=micros/sm_75/regfile-layout.ptx"] #include <cuda_fp16.h> extern "C" __global__ void add_s8( const char *A, char *OUT) { int id = blockDim.x * blockIdx.x + threadIdx.x; auto a = A[id] + 1; OUT[id] = a; } extern "C" __global__ void add_u8( const unsigned char *A, unsigned char *OUT) { int id = blockDim.x * blockIdx.x + threadIdx.x; auto a = A[id] + 1; OUT[id] = a; } extern "C" __global__ void add_u8x4( const uchar4 *A, const uchar4 *B, uchar4 *OUT) { int id = blockDim.x * blockIdx.x + threadIdx.x; auto a = A[id], b = B[id]; auto t1 = make_uchar4(a.x+b.x,a.y+b.y,a.z+b.z,a.w+b.w); auto t2 = make_uchar4(t1.x+0x88,t1.y+0x88,t1.z+0x88,t1.w+0x88); OUT[id] = t2; } extern "C" __global__ void add_s16( const short *A, short *OUT) { int id = blockDim.x * blockIdx.x + threadIdx.x; auto a = A[id] + 1; OUT[id] = a; } extern "C" __global__ void add_u16( const unsigned short *A, unsigned short *OUT) { int id = blockDim.x * blockIdx.x + threadIdx.x; auto a = A[id] + 1; OUT[id] = a; } extern "C" __global__ void add_u16x2( const short2 *A, const short2 *B, short2 *OUT) { int id = blockDim.x * blockIdx.x + threadIdx.x; auto a = A[id], b = B[id]; auto t1 = make_short2(a.x + b.x, a.y + b.y); auto t2 = make_short2(t1.x + 0x1616, t1.y + 0x1616); OUT[id] = t2; } extern "C" __global__ void add_f16( const __half *A, const __half *B, __half *OUT) { int id = blockDim.x * blockIdx.x + threadIdx.x; auto a = A[id] + B[id]; OUT[id] = a; } extern "C" __global__ void add_f16x2( const __half2 *A, const __half2 *B, __half2 *OUT) { int id = blockDim.x * blockIdx.x + threadIdx.x; auto a = A[id] + B[id]; OUT[id] = a; } extern "C" __global__ void add_s64( const long long *A, long long *OUT) { int id = blockDim.x * blockIdx.x + threadIdx.x; auto a = A[id] + 0x33; OUT[id] = a; } extern "C" __global__ void add_u64( const unsigned long long *A, unsigned long long *OUT) { int id = blockDim.x * blockIdx.x + threadIdx.x; auto a = A[id] + 0x44; OUT[id] = a; }
11,905
#include "includes.h" __constant__ float *c_Kernel; __global__ void compare(float *d_ip_v, float *d_ip_ir, int len) { const int X = blockIdx.x * blockDim.x + threadIdx.x; if (X < len) { d_ip_v[X] = (abs(d_ip_v[X]) > abs(d_ip_ir[X])) ? d_ip_v[X] : d_ip_ir[X]; } }
11,906
#include "includes.h" __global__ void vec_add(int* A, int* B, int* C, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { C[index] = A[index] + B[index]; } }
11,907
// B=diag(A) extern "C" { __global__ void diag_kernel_32(const int lengthA, const float *a, float *b) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthA) { b[i]=a[i+i*lengthA]; } } }
11,908
// version 0 // global memory only interleaved version // include comments describing your approach __global__ void histogram_global_kernel(unsigned int *input, unsigned int *bins, unsigned int num_elements, unsigned int num_bins) { // insert your code here } // version 1 // shared memory privatized version // include comments describing your approach __global__ void histogram_shared_kernel(unsigned int *input, unsigned int *bins, unsigned int num_elements, unsigned int num_bins) { // insert your code here } // version 2 // your method of optimization using shared memory // include DETAILED comments describing your approach __global__ void histogram_shared_accumulate_kernel(unsigned int *input, unsigned int *bins, unsigned int num_elements, unsigned int num_bins) { // insert your code here } // clipping function // resets bins that have value larger than 127 to 127. // that is if bin[i]>127 then bin[i]=127 __global__ void convert_kernel(unsigned int *bins, unsigned int num_bins) { // insert your code here }
11,909
#include <stdio.h> #include <stdlib.h> #include <cuda.h> int main(int argc, char *argv[]){ struct cudaDeviceProp properties; cudaGetDeviceProperties(&properties, 0); printf("using %i multiprocessors\n max threads per processor: %i \n" ,properties.multiProcessorCount ,properties.maxThreadsPerMultiProcessor); return 0; }
11,910
#include <stdio.h> #define SIZE 16 __global__ void VectorAdd(int *a,int *b,int *c,int n) { int i = threadIdx.x; if(i < n)c[i] = a[i] + b[i]; else printf("-----%d-----\n",i); int j = threadIdx.x; int k = threadIdx.y; int e = blockIdx.x; int f = blockIdx.y; int g = blockDim.x; int h = blockDim.y; printf("thread:%d,%d,block:%d,%d,blockdim:%d,%d\n",j,k,e,f,g,h); } int main() { int *a,*b,*c; cudaMallocManaged(&a, SIZE * sizeof(int)); cudaMallocManaged(&b, SIZE * sizeof(int)); cudaMallocManaged(&c, SIZE * sizeof(int)); for (int i = 0;i < SIZE;i++) { a[i] = b[i] = i; c[i] = 0; } VectorAdd <<<1,SIZE>>> (a,b,c,SIZE); cudaDeviceSynchronize(); for (int i = 0; i < 10; i++)printf("c[%d] = %d\n",i,c[i]); cudaFree(a); cudaFree(b); cudaFree(c); return 0; }
11,911
#include "includes.h" __global__ void histogram ( unsigned char *utime, unsigned int* histo, size_t n) { __shared__ unsigned int lhisto[512]; lhisto[threadIdx.x] = 0; __syncthreads (); int i = threadIdx.x + blockIdx.x*blockDim.x; for (; i < n/2; i += blockDim.x*gridDim.x) atomicAdd (lhisto+utime[i], 1); for (; i < n; i += blockDim.x*gridDim.x) atomicAdd ((lhisto+256)+utime[i], 1); __syncthreads (); // MUST run with 512 threads for this global accumulation to work atomicAdd ( histo+threadIdx.x, lhisto[threadIdx.x]); }
11,912
//Date 1 April 2019 //Program: To multiply two matrices #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> //for random numbers #include <time.h> #include <sys/time.h> #define gpuErrchk(ans){ gpuAssert((ans),__FILE__, __LINE__);} inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if(code != cudaSuccess) { fprintf(stderr, "GPUassert : %s %s %d\n", cudaGetErrorString(code), file, line); if(abort) exit(code); } } __global__ void matrix_multiply(int *A, int *B, int *C, int Am, int An, int Bn) { int i = threadIdx.x; int j = threadIdx.y; int sum = 0; if(i < Am && j < Bn) { for(int k=0; k<An; ++k) { sum += A[ i * An + k] * B[k * Bn + j]; } C[i * Bn + j] = sum; } } int main() { // host function definition int get_random(); int get_max(int, int); //variable definition int *hA, *hB, *hC, *hD, *dA, *dB, *dC; int rows_A, columns_A, rows_B, columns_B; //define size of array do{ printf("The number of columns of first matrix must \n be equal to number of rows of second matrix!!!\n"); printf("Enter the rows and columns of A\n"); scanf("%d",&rows_A); scanf("%d",&columns_A); printf("Enter the rows and columns of B\n"); scanf("%d",&rows_B); scanf("%d",&columns_B); }while(columns_A != rows_B); dim3 block(get_max(rows_A,rows_B), get_max(columns_A, columns_B)); int size = sizeof(int) * get_max(rows_A * columns_A, rows_B * columns_B); //memory allocation in host hA = (int*)malloc(size); hB = (int*)malloc(size); hC = (int*)malloc(size); hD = (int*)malloc(size); //memory allocation in device gpuErrchk(cudaMalloc((void**)&dA,size)); gpuErrchk(cudaMalloc((void**)&dB,size)); gpuErrchk(cudaMalloc((void**)&dC,size)); //array initilization for(int i=0; i<rows_A; ++i) { for(int j=0; j< columns_A; ++j) { hA[i * columns_A + j] = get_random(); } } for(int i=0; i<rows_B; ++i) { for(int j=0; j< columns_B; ++j) { hB[i * columns_B + j] = get_random(); } } clock_t host_begin, host_end; //record begin of host computation host_begin = clock(); //multiply matrix in host for(int i=0; i<rows_A; ++i) { for(int j=0; j< columns_B; ++j) { int sum = 0; for(int k=0; k< columns_A; ++k) { sum += hA[i * columns_A + k] * hB[k * columns_B + j]; } hC[i * columns_B + j] = sum ; } } //record end of host computation host_end = clock(); clock_t device_begin, device_end; //record of device computation device_begin = clock(); //copy host data to memory gpuErrchk(cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(dC, hC, size, cudaMemcpyHostToDevice)); //record start of device computation // multiply matix in device matrix_multiply<<<1, block>>>(dA, dB, dC, rows_A, columns_A, columns_B ); //copy data from device to host gpuErrchk(cudaMemcpy(hD, dC, size, cudaMemcpyDeviceToHost)); //record end of device computation device_end = clock(); double host_time, device_time; host_time = (double)((double)(host_end - host_begin)/(CLOCKS_PER_SEC)); device_time = (double)((double)(device_end - device_begin)/(CLOCKS_PER_SEC)); //print the time of host and device computation printf("++++++++++++++++++++++++++++++++++++++++++++++++++++++++"); printf("\n\t\tHost computation time: %f\n",host_time); printf("\t\tDevice computation time: %f\n",device_time); printf("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"); //------------------------------------------------------------------------------- /* // display element of A printf("\n\n Matrix A\n\n"); for(int i=0; i<rows_A; ++i) { for(int j=0; j< columns_A; ++j) { printf("%d\t",hA[i * columns_A + j]); } printf("\n"); } // display element of B printf("\n\n Matrix B\n\n"); for(int i=0; i<rows_B; ++i) { for(int j=0; j< columns_B; ++j) { printf("%d\t",hA[i * columns_B + j]); } printf("\n"); } // display element of AB printf("\n\n Matrix AB\n\n"); for(int i=0; i<rows_A; ++i) { for(int j=0; j< columns_B; ++j) { printf("%d\t",hC[i * columns_B + j]); } printf("\n"); } */ //display the devation of device and host result //-------------------------------------------------------------------------------------------- int sum = 0; for(int i=0; i< rows_A; ++i) { for(int j=0; j< columns_B; ++j) { sum += hD[i * columns_B + j] - hC[i * columns_B + j]; } } printf("\nThe deviation of host and device result is %d\n",sum); //free host memory free(hA); free(hB); free(hC); free(hD); //free device memory gpuErrchk(cudaFree(dA)); gpuErrchk(cudaFree(dB)); gpuErrchk(cudaFree(dC)); } //random number generator int get_random() { return rand() % 10 + 1; } int get_max(int a, int b) { return a >= b ? a : b; }
11,913
#include "includes.h" /* * Example of how to use the mxGPUArray API in a MEX file. This example shows * how to write a MEX function that takes a gpuArray input and returns a * gpuArray output, e.g. B=mexFunction(A). * * Copyright 2012 The MathWorks, Inc. */ #define DIVUP(m,n) ((m)/(n)+((m)%(n)>0)) int const threadsPerBlock = (sizeof(unsigned long long) * 8); /* * Device code */ __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thres, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y, col_start = blockIdx.x; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock), col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); //if (row_start > col_start) return; __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) start = threadIdx.x + 1; for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thres) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } }
11,914
// adaptation of Pavel's imreconstruction code for openCV #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/tuple.h> #include <thrust/functional.h> #include <thrust/tuple.h> #include <thrust/iterator/permutation_iterator.h> #include <thrust/copy.h> #include <thrust/unique.h> #include <thrust/sort.h> #include <thrust/count.h> #define MAX_THREADS 256 #define XX_THREADS 4 #define XY_THREADS 32 #define NEQ(a,b) ( (a) != (b) ) #define WARP_SIZE 32 //using namespace cv::gpu; //using namespace cv::gpu::device; namespace nscale { namespace gpu { //////////////////////////////////////////////////////////////////////////////// // RECONSTRUCTION BY DILATION //////////////////////////////////////////////////////////////////////////////// /* * warp = 32. shared memory in banks of 32, each 32 bits (128 bytes wide) - interleave of 4 for rows? no need. compute 2 has no conflict for read/write bytes. * global memory in partitions of 256 bytes. 1 warp at a time at 1, 2, 4, 8, or 16 bytes. width of array and threadblock = warpsize * c, * try to remove syncthreads by making sure warps do not diverge(and use volatile) * thread id = x + y * Dx. so this means if x and y are swapped between mem and compute steps, must have sync... * IF 32 x 8 theads, repeat 4 times in y. read single char from global, then swap x and y to process 32 y at a time, would need to syncthread inside iterations. can use 1 warp to go through all shared mem iteratively, or have each warp compute 4 bytes 4 columns (warps are ordered) * IF 8x4 or 4x8 threads for a warp, read 1 bytes from global (linearize the warp thread id (e.g. x + y*8 or x+y*4) to read from global sequentially, and repeat 4 or 8 times) then process the memory for this warp 4 y or 8 y iteratively, repeat for all x chunks. essentially the original algorithm. then create threadblock that is just multiplied in y to reach 192 or 256. avoids syncthreads completely. * or alternatively, treat each warp as 4x8, and each x process columns 8 apart. each warp then do 4 bytes, (8 warps), to generate 8x8 blocks that are completed. - no synthreads needed. - no... would require more kernel iterations for backward: thread ids should map to the data - so first thread has the last data.... ( for correctness) for y, similar to this... for register usage: use unsigned int where possible. maybe use 1D shared array would be better too... */ template <typename T> __global__ void iRec1DForward_X_dilation ( T* marker, const T* mask, const unsigned int sx, const unsigned int sy) { const unsigned int x = (threadIdx.x + threadIdx.y * XX_THREADS) % WARP_SIZE; const unsigned int y = (threadIdx.x + threadIdx.y * XX_THREADS) / WARP_SIZE; const unsigned int ychunk = WARP_SIZE / XX_THREADS; const unsigned int xstop = sx - WARP_SIZE; // printf("(tx, ty) -> (x, y) : (%d, %d)->(%d,%d)\n", threadIdx.x, threadIdx.y, x, y); // XY_THREADS should be 32==warpSize, XX_THREADS should be 4 or 8. // init to 0... volatile __shared__ T s_marker[XY_THREADS][WARP_SIZE+1]; volatile __shared__ T s_mask [XY_THREADS][WARP_SIZE+1]; T s_old, s_new; unsigned int startx; unsigned int start; s_marker[threadIdx.y][WARP_SIZE] = 0; // only need x=0 to be 0 // the increment allows overlap by 1 between iterations to move the data to next block. for (startx = 0; startx < xstop; startx += WARP_SIZE) { start = (blockIdx.x * XY_THREADS + y * ychunk) * sx + startx + x; // printf("tx: %d, ty: %d, x: %d, y: %d, startx: %d, start: %d", threadIdx.x, threadIdx.y, x, y, startx, start); s_marker[threadIdx.y][0] = s_marker[threadIdx.y][WARP_SIZE]; // copy part of marker and mask to shared memory. works for 1 warp at a time... //#pragma unroll for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) { s_marker[y * ychunk+i][x+1] = marker[start + i*sx]; s_mask [y * ychunk+i][x+1] = mask[start + i*sx]; } // perform iteration all X threads do the same operations, so there may be read/write hazards. but the output is the same. // this is looping for BLOCK_SIZE times, and each iteration the final results are propagated 1 step closer to tx. // if (threadIdx.x == 0) { // have all threads do the same work //#pragma unroll if (threadIdx.y + blockIdx.x * XY_THREADS < sy) { //require dimension to be perfectly padded. for (unsigned int i = 1; i <= WARP_SIZE; ++i) { s_old = s_marker[threadIdx.y][i]; s_new = min( max( s_marker[threadIdx.y][i-1], s_old ), s_mask[threadIdx.y][i] ); s_marker[threadIdx.y][i] = s_new; } } // output result back to global memory and set up for next x chunk //#pragma unroll for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) { marker[start + i*sx] = s_marker[y * ychunk+i][x+1]; } // printf("startx: %d, change = %d\n", startx, s_change); } if (startx < sx) { s_marker[threadIdx.y][0] = s_marker[threadIdx.y][sx-startx]; // getting ix-1st entry, which has been offsetted by 1 in s_marker // shared mem copy startx = sx - WARP_SIZE; start = (blockIdx.x * XY_THREADS + y * ychunk) * sx + startx + x; // printf("tx: %d, ty: %d, x: %d, y: %d, startx: %d, start: %d", threadIdx.x, threadIdx.y, x, y, startx, start); // copy part of marker and mask to shared memory. works for 1 warp at a time... //#pragma unroll for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) { s_marker[y * ychunk+i][x+1] = marker[start + i*sx]; s_mask [y * ychunk+i][x+1] = mask[start + i*sx]; } // perform iteration all X threads do the same operations, so there may be read/write hazards. but the output is the same. // this is looping for BLOCK_SIZE times, and each iteration the final results are propagated 1 step closer to tx. //#pragma unroll if (threadIdx.y + blockIdx.x * XY_THREADS < sy) { //require dimension to be perfectly padded. for (unsigned int i = 1; i <= WARP_SIZE; ++i) { s_old = s_marker[threadIdx.y][i]; s_new = min( max( s_marker[threadIdx.y][i-1], s_old ), s_mask[threadIdx.y][i] ); s_marker[threadIdx.y][i] = s_new; } } // output result back to global memory and set up for next x chunk //#pragma unroll for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) { marker[start + i*sx] = s_marker[y * ychunk+i][x+1]; } } } template <typename T> __global__ void iRec1DBackward_X_dilation ( T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy) { const unsigned int x = (threadIdx.x + threadIdx.y * XX_THREADS) % WARP_SIZE; const unsigned int y = (threadIdx.x + threadIdx.y * XX_THREADS) / WARP_SIZE; const unsigned int ychunk = WARP_SIZE / XX_THREADS; const unsigned int xstop = sx - WARP_SIZE; // printf("(tx, ty) -> (x, y) : (%d, %d)->(%d,%d)\n", threadIdx.x, threadIdx.y, x, y); // XY_THREADS should be 32==warpSize, XX_THREADS should be 4 or 8. // init to 0... volatile __shared__ T s_marker[XY_THREADS][WARP_SIZE+1]; volatile __shared__ T s_mask [XY_THREADS][WARP_SIZE+1]; T s_old, s_new; int startx; unsigned int start; s_marker[threadIdx.y][0] = 0; // only need x=WARPSIZE to be 0 // the increment allows overlap by 1 between iterations to move the data to next block. for (startx = xstop; startx > 0; startx -= WARP_SIZE) { start = (blockIdx.x * XY_THREADS + y * ychunk) * sx + startx + x; // printf("tx: %d, ty: %d, x: %d, y: %d, startx: %d, start: %d", threadIdx.x, threadIdx.y, x, y, startx, start); s_marker[threadIdx.y][WARP_SIZE] = s_marker[threadIdx.y][0]; // copy part of marker and mask to shared memory. works for 1 warp at a time... //#pragma unroll for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) { s_marker[y * ychunk+i][x] = marker[start + i*sx]; s_mask [y * ychunk+i][x] = mask[start + i*sx]; } // perform iteration all X threads do the same operations, so there may be read/write hazards. but the output is the same. // this is looping for BLOCK_SIZE times, and each iteration the final results are propagated 1 step closer to tx. // if (threadIdx.x == 0) { // have all threads do the same work //#pragma unroll if (threadIdx.y + blockIdx.x * XY_THREADS < sy) { //require dimension to be perfectly padded. for (int i = WARP_SIZE - 1; i >= 0; --i) { s_old = s_marker[threadIdx.y][i]; s_new = min( max( s_marker[threadIdx.y][i+1], s_old ), s_mask[threadIdx.y][i] ); s_marker[threadIdx.y][i] = s_new; } } // output result back to global memory and set up for next x chunk //#pragma unroll for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) { marker[start + i*sx] = s_marker[y * ychunk+i][x]; } // printf("startx: %d, change = %d\n", startx, s_change); } if (startx <= 0) { s_marker[threadIdx.y][WARP_SIZE] = s_marker[threadIdx.y][-startx]; // getting ix-1st entry, which has been offsetted by 1 in s_marker // shared mem copy startx = 0; start = (blockIdx.x * XY_THREADS + y * ychunk) * sx + startx + x; // printf("tx: %d, ty: %d, x: %d, y: %d, startx: %d, start: %d", threadIdx.x, threadIdx.y, x, y, startx, start); // copy part of marker and mask to shared memory. works for 1 warp at a time... //#pragma unroll for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) { s_marker[y * ychunk+i][x] = marker[start + i*sx]; s_mask [y * ychunk+i][x] = mask[start + i*sx]; } // perform iteration all X threads do the same operations, so there may be read/write hazards. but the output is the same. // this is looping for BLOCK_SIZE times, and each iteration the final results are propagated 1 step closer to tx. //#pragma unroll if (threadIdx.y + blockIdx.x * XY_THREADS < sy) { //require dimension to be perfectly padded. for (int i = WARP_SIZE - 1; i >= 0; --i) { s_old = s_marker[threadIdx.y][i]; s_new = min( max( s_marker[threadIdx.y][i+1], s_old ), s_mask[threadIdx.y][i] ); s_marker[threadIdx.y][i] = s_new; } } // output result back to global memory and set up for next x chunk //#pragma unroll for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) { marker[start + i*sx] = s_marker[y * ychunk+i][x]; } } } template <typename T> __global__ void iRec1DForward_Y_dilation ( T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy) { // parallelize along x. const int tx = threadIdx.x; const int bx = blockIdx.x * MAX_THREADS; T s_old, s_new, s_prev; if ( (bx + tx) < sx ) { s_prev = 0; for (int iy = 0; iy < sy; ++iy) { // copy part of marker and mask to shared memory s_old = marker[iy * sx + bx + tx]; // perform iteration s_new = min( max( s_prev, s_old ), mask[iy * sx + bx + tx] ); s_prev = s_new; // output result back to global memory marker[iy * sx + bx + tx] = s_new; } } } template <typename T> __global__ void iRec1DBackward_Y_dilation ( T* __restrict__ marker, const T* __restrict__ mask, const unsigned int sx, const unsigned int sy ) { const int tx = threadIdx.x; const int bx = blockIdx.x * MAX_THREADS; T s_old, s_new, s_prev; if ( (bx + tx) < sx ) { s_prev = 0; for (int iy = sy - 1; iy >= 0; --iy) { // copy part of marker and mask to shared memory s_old = marker[iy * sx + bx + tx]; // perform iteration s_new = min( max( s_prev, s_old ), mask[iy * sx + bx + tx] ); s_prev = s_new; // output result back to global memory marker[iy * sx + bx + tx] = s_new; } } } template <typename T> __global__ void iRec1DForward_Y_dilation_8 ( T* __restrict__ marker, const T* __restrict__ mask, const unsigned int sx, const unsigned int sy) { // best thing to do is to use linear arrays. each warp does a column of 32. // parallelize along x. const unsigned int tx = threadIdx.x; const unsigned int bx = blockIdx.x * MAX_THREADS; volatile __shared__ T s_marker_B[MAX_THREADS+2]; volatile T* s_marker = s_marker_B + 1; T s_new, s_old, s_prev; if ( bx + tx < sx ) { // make sure number of threads is a divisor of sx. s_prev = 0; for (int iy = 0; iy < sy; ++iy) { // copy part of marker and mask to shared memory if (tx == 0) { s_marker_B[0] = (bx == 0) ? 0 : marker[iy*sx + bx - 1]; s_marker[MAX_THREADS] = (bx + MAX_THREADS >= sx) ? 0 : marker[iy*sx + bx + MAX_THREADS]; } if (tx < WARP_SIZE) { // first warp, get extra stuff s_marker[tx] = marker[iy*sx + bx + tx]; } if (tx < MAX_THREADS - WARP_SIZE) { s_marker[tx + WARP_SIZE] = marker[iy*sx + bx + tx + WARP_SIZE]; } __syncthreads(); // perform iteration s_old = s_marker[tx]; s_new = min( max( s_prev, s_old ), mask[iy*sx + bx + tx]); // output result back to global memory s_marker[tx] = s_new; marker[iy*sx + bx + tx] = s_new; __syncthreads(); s_prev = max( max(s_marker[tx-1], s_marker[tx]), s_marker[tx+1]); } } } template <typename T> __global__ void iRec1DBackward_Y_dilation_8 ( T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy) { const int tx = threadIdx.x; const int bx = blockIdx.x * MAX_THREADS; volatile __shared__ T s_marker_B[MAX_THREADS+2]; volatile T* s_marker = s_marker_B + 1; T s_new, s_old, s_prev; if ( bx + tx < sx ) { //make sure number of threads is a divisor of sx. s_prev = 0; for (int iy = sy - 1; iy >= 0; --iy) { if (tx == 0) { s_marker_B[0] = (bx == 0) ? 0 : marker[iy*sx + bx - 1]; s_marker[MAX_THREADS] = (bx + MAX_THREADS >= sx) ? 0 : marker[iy*sx + bx + MAX_THREADS]; } if (tx < WARP_SIZE) { // first warp, get extra stuff s_marker[tx] = marker[iy*sx + bx + tx]; } if (tx < MAX_THREADS - WARP_SIZE) { s_marker[tx + WARP_SIZE] = marker[iy*sx + bx + tx + WARP_SIZE]; } __syncthreads(); // perform iteration s_old = s_marker[tx]; s_new = min( max( s_prev, s_old ), mask[iy*sx + bx + tx]); // output result back to global memory s_marker[tx] = s_new; marker[iy*sx + bx + tx] = s_new; __syncthreads(); s_prev = max( max(s_marker[tx-1], s_marker[tx]), s_marker[tx+1]); } } } template<typename T, typename TN> struct InitialImageToQueue : public thrust::unary_function<TN, int> { __host__ __device__ int operator()(const TN& pixel) const { T center = thrust::get<1>(pixel); T curr; int id = thrust::get<0>(pixel); curr = thrust::get<2>(pixel); if (curr < center && curr < thrust::get<6>(pixel)) return id; curr = thrust::get<3>(pixel); if (curr < center && curr < thrust::get<7>(pixel)) return id; curr = thrust::get<4>(pixel); if (curr < center && curr < thrust::get<8>(pixel)) return id; curr = thrust::get<5>(pixel); if (curr < center && curr < thrust::get<9>(pixel)) return id; return -1; } }; // this works //template<typename T, typename TN> //struct ReconPixel : public thrust::unary_function<TN, int> //{ // __host__ __device__ // int operator()(const TN& pixel) // { // thrust::minimum<T> mn; // T center = thrust::get<1>(pixel); // int id = thrust::get<0>(pixel); // T q = thrust::get<2>(pixel); // T p = thrust::get<6>(pixel); // if (q < center && p != q) { // q = mn(center, p); // return id - 4098; // } // return -1; // } //}; // this works too. //template<typename T, typename TN> //struct ReconPixel : public thrust::unary_function<TN, T> //{ // __host__ __device__ // T operator()(const TN& pixel) // { // thrust::minimum<T> mn; // int idx1 = -1; // T center = thrust::get<1>(pixel); // int id = thrust::get<0>(pixel); // // T q = thrust::get<2>(pixel); // T p = thrust::get<3>(pixel); // if (q < center && p != q) { // q = mn(center, p); // idx1 = id - 4098; // } // return q; // } //}; // this works too //template<typename T, typename TN, typename TN2> //struct ReconPixel : public thrust::unary_function<TN, T> //{ // __host__ __device__ // T operator()(TN pixel) // { // thrust::minimum<T> mn; // int idx1 = -1; // T center = thrust::get<1>(pixel); // int id = thrust::get<0>(pixel); // // T q = thrust::get<2>(pixel); // T p = thrust::get<3>(pixel); // if (q < center && p != q) { // q = mn(center, p); // idx1 = id - 4098; // } // thrust::get<2>(pixel) = q; //// TN2 test= thrust::make_tuple(idx1, q); // return q; // } //}; // DOES NOT WORK //template<typename T, typename TN, typename TN2> //struct ReconPixel : public thrust::unary_function<TN, TN2> //{ // __host__ __device__ // TN2 operator()(TN pixel) // { // thrust::minimum<T> mn; // int idx1 = -1; // T center = thrust::get<1>(pixel); // int id = thrust::get<0>(pixel); // // T q = thrust::get<2>(pixel); // T p = thrust::get<3>(pixel); // if (q < center && p != q) { // q = mn(center, p); // idx1 = id - 4098; // } //// thrust::get<2>(pixel) = q; //// TN2 test= thrust::make_tuple(idx1, q); // return TN2(idx1, q); // } //}; // DOES NOT UPDATE INPUT //template<typename T, typename TN, typename TO> //struct ReconPixel : public thrust::binary_function<TN, TO, bool> //{ // int step1, step2, step3, step4; // // __host__ __device__ // ReconPixel(int _s1, int _s2, int _s3, int _s4) : step1(_s1), step2(_s2), step3(_s3), step4(_s4) {} // // __host__ __device__ // bool operator()(TN pixel, TO queue) // { // thrust::minimum<T> mn; // int id = thrust::get<0>(pixel); // T center = thrust::get<1>(pixel); // T p, q; // int nextId; // bool result = false; // // q = thrust::get<2>(pixel); // p = thrust::get<6>(pixel); // nextId = -1; // if (q < center && q != p) { // thrust::get<2>(pixel) = mn(center, p); // nextId = id + step1; // result = true; // } // thrust::get<0>(queue) = nextId; // // q = thrust::get<3>(pixel); // p = thrust::get<7>(pixel); // nextId = -2; // if (q < center && q != p) { // thrust::get<3>(pixel) = mn(center, p); // nextId = id + step2; // result = true; // } // thrust::get<1>(queue) = nextId; // // q = thrust::get<4>(pixel); // p = thrust::get<8>(pixel); // nextId = -3; // if (q < center && q != p) { // thrust::get<4>(pixel) = mn(center, p); // nextId = id + step3; // result = true; // } // thrust::get<2>(queue) = nextId; // // q = thrust::get<5>(pixel); // p = thrust::get<9>(pixel); // nextId = -4; // if (q < center && q != p) { // thrust::get<5>(pixel) = mn(center, p); // nextId = id + step4; // result = true; // } // thrust::get<3>(queue) = nextId; // // // return result; // } //}; // template<typename T> struct Propagate { volatile T *marker; volatile T *mask; bool *flag; const int step; __host__ __device__ Propagate(T* _marker, T* _mask, bool* _flag, int _step) : marker(_marker), mask(_mask), flag(_flag), step(_step) {} __host__ __device__ void updateNeighbor(int nId, T center, thrust::minimum<T> mn) { T q = marker[nId]; T p = mask[nId]; if (q != p && q < center) { marker[nId] = mn(center, p); // flag[nId] = true; } } __host__ __device__ void updateAndMarkNeighbor(int nId, T center, thrust::minimum<T> mn) { T q = marker[nId]; T p = mask[nId]; if (q != p && q < center) { marker[nId] = mn(center, p); flag[nId] = true; } } __host__ __device__ void operator()(int id) { thrust::minimum<T> mn; T center = marker[id]; int nId; nId = id - 1; updateNeighbor(nId, center, mn); nId = id + 1; updateNeighbor(nId, center, mn); nId = id - step - 1; updateNeighbor(nId, center, mn); nId = id - step; updateNeighbor(nId, center, mn); nId = id - step + 1; updateNeighbor(nId, center, mn); nId = id + step - 1; updateNeighbor(nId, center, mn); nId = id + step; updateNeighbor(nId, center, mn); nId = id + step + 1; updateNeighbor(nId, center, mn); nId = id - 2; updateAndMarkNeighbor(nId, center, mn); nId = id + 2; updateAndMarkNeighbor(nId, center, mn); nId = id - 2 * step - 2; updateAndMarkNeighbor(nId, center, mn); nId = id - 2 * step - 1; updateAndMarkNeighbor(nId, center, mn); nId = id - 2 * step; updateAndMarkNeighbor(nId, center, mn); nId = id - 2 * step + 1; updateAndMarkNeighbor(nId, center, mn); nId = id - 2 * step + 2; updateAndMarkNeighbor(nId, center, mn); nId = id - step - 2; updateAndMarkNeighbor(nId, center, mn); nId = id - step + 2; updateAndMarkNeighbor(nId, center, mn); nId = id + step - 2; updateAndMarkNeighbor(nId, center, mn); nId = id + step + 2; updateAndMarkNeighbor(nId, center, mn); nId = id + 2 * step - 2; updateAndMarkNeighbor(nId, center, mn); nId = id + 2 * step - 1; updateAndMarkNeighbor(nId, center, mn); nId = id + 2 * step; updateAndMarkNeighbor(nId, center, mn); nId = id + 2 * step + 1; updateAndMarkNeighbor(nId, center, mn); nId = id + 2 * step + 2; updateAndMarkNeighbor(nId, center, mn); } }; // this functor returns true if the argument is odd, and false otherwise template <typename T> struct GreaterThanConst : public thrust::unary_function<T,bool> { const T k; __host__ __device__ GreaterThanConst(T _k) : k(_k) {} __host__ __device__ bool operator()(T x) { return x > k; } }; // connectivity: need to have border of 0 ,and should be continuous template <typename T> unsigned int imreconQueueIntCaller(T* __restrict__ marker, T* __restrict__ mask, const int sx, const int sy, const int connectivity, cudaStream_t stream) { // printf("entering imrecon int caller with conn=%d\n", connectivity); // setup execution parameters dim3 threadsx( XX_THREADS, XY_THREADS ); dim3 blocksx( (sy + threadsx.y - 1) / threadsx.y ); dim3 threadsy( MAX_THREADS ); dim3 blocksy( (sx + threadsy.x - 1) / threadsy.x ); // stability detection // dopredny pruchod pres osu X iRec1DForward_X_dilation <<< blocksx, threadsx, 0, stream >>> ( marker, mask, sx, sy); // dopredny pruchod pres osu Y if (connectivity == 4) { // dopredny pruchod pres osu Y iRec1DForward_Y_dilation <<< blocksy, threadsy, 0, stream >>> ( marker, mask, sx, sy); } else { iRec1DForward_Y_dilation_8<<< blocksy, threadsy, 0, stream >>> ( marker, mask, sx, sy); } // zpetny pruchod pres osu X iRec1DBackward_X_dilation<<< blocksx, threadsx, 0, stream >>> ( marker, mask, sx, sy); // dopredny pruchod pres osu Y if (connectivity == 4) { // dopredny pruchod pres osu Y iRec1DBackward_Y_dilation <<< blocksy, threadsy, 0, stream >>> ( marker, mask, sx, sy); } else { // zpetny pruchod pres osu Y iRec1DBackward_Y_dilation_8<<< blocksy, threadsy, 0, stream >>> ( marker, mask, sx, sy); } if (stream == 0) cudaDeviceSynchronize(); else cudaStreamSynchronize(stream); // printf("%d sync \n", iter); // set up some types to work with thrust::device_ptr<T> p(mask + sx + 1); thrust::device_ptr<T> p_ym1(mask + 1); thrust::device_ptr<T> p_yp1(mask + 2 * sx + 1); thrust::device_ptr<T> p_xm1(mask + sx); thrust::device_ptr<T> p_xp1(mask + sx + 2); // conn = 8 thrust::device_ptr<T> p_ym1xm1(mask); thrust::device_ptr<T> p_ym1xp1(mask + 2); thrust::device_ptr<T> p_yp1xm1(mask + 2 * sx); thrust::device_ptr<T> p_yp1xp1(mask + 2 * sx + 2); thrust::device_ptr<T> q(marker + sx + 1); thrust::device_ptr<T> q_ym1(marker + 1); thrust::device_ptr<T> q_yp1(marker + 2 * sx + 1); thrust::device_ptr<T> q_xm1(marker + sx); thrust::device_ptr<T> q_xp1(marker + sx + 2); // conn = 8 thrust::device_ptr<T> q_ym1xm1(marker); thrust::device_ptr<T> q_ym1xp1(marker + 2); thrust::device_ptr<T> q_yp1xm1(marker + 2 * sx); thrust::device_ptr<T> q_yp1xp1(marker + 2 * sx + 2); int area = sx * (sy - 4) - 4; // actual image area - sx and sy are padded by 1 on each side, typedef typename thrust::device_ptr<T> PixelIterator; // typedef typename thrust::tuple<int, T, T, T, T, T> PixelNeighborhood; // typedef typename thrust::tuple<thrust::counting_iterator<int>, PixelIterator, PixelIterator, PixelIterator, PixelIterator, PixelIterator> WindowedImage; // typedef typename thrust::zip_iterator<WindowedImage> WindowedPixelIterator; typedef typename thrust::tuple<signed int, T, T, T, T, T, T, T, T, T> ReconNeighborhood; typedef typename thrust::tuple<signed int, T, T, T> ReconNeighborhood2; typedef typename thrust::tuple<thrust::counting_iterator<int>, PixelIterator, PixelIterator, PixelIterator, PixelIterator, PixelIterator, PixelIterator, PixelIterator, PixelIterator, PixelIterator> ReconImage; typedef typename thrust::zip_iterator<ReconImage> ReconPixelIterator; typedef typename thrust::device_vector<int> Queue; typedef typename Queue::iterator QueueIterator; typedef typename thrust::tuple<int, int, int, int> QueueElement; thrust::counting_iterator<int> ids(0); // WindowedImage markerImg = thrust::make_tuple(ids, q_ym1xm1, q_ym1, q_ym1xp1, q_xm1, q, q_xp1, q_yp1xm1, q_yp1, q_yp1xp1); // WindowedImage markerImgEnd = thrust::make_tuple(ids+area, q_ym1xm1+area, q_ym1+area, q_ym1xp1+area, q_xm1+area, q+area, q_xp1+area, q_yp1xm1+area, q_yp1+area, q_yp1xp1+area); // WindowedImage maskImg = thrust::make_tuple(ids, p_ym1xm1, p_ym1, p_ym1xp1, p_xm1, p, p_xp1, p_yp1xm1, p_yp1, p_yp1xp1); // ReconPixelIterator mask_last = thrust::make_zip_iterator(thrust::make_tuple(p_ym1xm1+area, p_ym1+area, p_ym1xp1+area, p_xm1+area, p+area, p_xp1+area, p_yp1xm1+area, p_yp1+area, p_yp1xp1+area)); ReconImage markermaskNp = thrust::make_tuple(ids, q, q_xp1, q_yp1xm1, q_yp1, q_yp1xp1, p_xp1, p_yp1xm1, p_yp1, p_yp1xp1); ReconImage markermaskNpEnd = thrust::make_tuple(ids+area, q+area, q_xp1+area, q_yp1xm1+area, q_yp1+area, q_yp1xp1+area, p_xp1+area, p_yp1xm1+area, p_yp1+area, p_yp1xp1+area); ReconPixelIterator image_first = thrust::make_zip_iterator(markermaskNp); ReconPixelIterator image_last = thrust::make_zip_iterator(markermaskNpEnd); // put the candidates into the queue int queueSize = area; Queue sparseQueue(queueSize, -1); // can change into transform_iterator to use in the copy operation. the only challenge is don't know queue size, and would still need to compact later... // mark thrust::transform(image_first, image_last, sparseQueue.begin(), InitialImageToQueue<T, ReconNeighborhood>()); // select queueSize = thrust::count_if(sparseQueue.begin(), sparseQueue.end(), GreaterThanConst<int>(-1)); Queue testQueue(area, -1); // compact the queue Queue denseQueue(queueSize, 0); QueueIterator denseQueue_end = thrust::copy_if(sparseQueue.begin(), sparseQueue.end(), denseQueue.begin(), GreaterThanConst<int>(-1)); QueueIterator sparseQueue_end; thrust::device_vector<bool> dummy(area, false); printf("number of entries in sparseQueue: %d, denseQueue: %lu \n", queueSize, denseQueue_end - denseQueue.begin()); int iterations = 0; int total = 0; while (queueSize > 0 && iterations < 10000) { ++iterations; total += queueSize; // printf("here\n"); // allocate some memory // sparseQueue.resize(queueSize * 8); // 8 neighbors // thrust::fill(sparseQueue.begin(), sparseQueue.end(), -1); // also set up as 8 devPtrs // QueueIterator ym1xm1 = sparseQueue.begin(); // QueueIterator ym1 = ym1xm1+queueSize; // QueueIterator ym1xp1 = ym1+queueSize; // QueueIterator xm1 = ym1xp1+queueSize; // QueueIterator xp1 = xm1+queueSize; // QueueIterator yp1xm1 = xp1+queueSize; // QueueIterator yp1 = yp1xm1+queueSize; // QueueIterator yp1xp1 = yp1+queueSize; // printf("here3\n"); // dummy.resize(queueSize); // sort the queue by the value sparseQueue_end = thrust::copy(denseQueue.begin(), denseQueue.end(), sparseQueue.begin()); thrust::stable_sort_by_key(thrust::make_permutation_iterator(q, sparseQueue.begin()), thrust::make_permutation_iterator(q, sparseQueue_end), denseQueue.begin()); thrust::fill(dummy.begin(), dummy.end(), false); thrust::for_each(denseQueue.begin(), denseQueue.end(), Propagate<T>(thrust::raw_pointer_cast(q), thrust::raw_pointer_cast(p), thrust::raw_pointer_cast(&*dummy.begin()), sx)); // does not work... // thrust::transform(thrust::make_zip_iterator(thrust::make_tuple( // thrust::make_permutation_iterator(ids, denseQueue.begin()), // thrust::make_permutation_iterator(q, denseQueue.begin()), // thrust::make_permutation_iterator(q_ym1, denseQueue.begin()), // thrust::make_permutation_iterator(q_xm1, denseQueue.begin()), // thrust::make_permutation_iterator(q_xp1, denseQueue.begin()), // thrust::make_permutation_iterator(q_yp1, denseQueue.begin()), // thrust::make_permutation_iterator(p_ym1, denseQueue.begin()), // thrust::make_permutation_iterator(p_xm1, denseQueue.begin()), // thrust::make_permutation_iterator(p_xp1, denseQueue.begin()), // thrust::make_permutation_iterator(p_yp1, denseQueue.begin()))), // thrust::make_zip_iterator(thrust::make_tuple( // thrust::make_permutation_iterator(ids, denseQueue.end()), // thrust::make_permutation_iterator(q, denseQueue.end()), // thrust::make_permutation_iterator(q_ym1, denseQueue.end()), // thrust::make_permutation_iterator(q_xm1, denseQueue.end()), // thrust::make_permutation_iterator(q_xp1, denseQueue.end()), // thrust::make_permutation_iterator(q_yp1, denseQueue.end()), // thrust::make_permutation_iterator(p_ym1, denseQueue.end()), // thrust::make_permutation_iterator(p_xm1, denseQueue.end()), // thrust::make_permutation_iterator(p_xp1, denseQueue.end()), // thrust::make_permutation_iterator(p_yp1, denseQueue.end()))), // thrust::make_zip_iterator(thrust::make_tuple(ym1, xm1, xp1, yp1)), // dummy.begin(), // ReconPixel<T, ReconNeighborhood, QueueElement>(-sx, (int)-1, (int)1, sx)); // thrust::fill(testQueue.begin(), testQueue.end(), -1); // thrust::transform(image_first, image_last, testQueue.begin(), InitialImageToQueue<T, ReconNeighborhood>()); // printf("test queue size : %d \n", thrust::count_if(testQueue.begin(), testQueue.end(), GreaterThanConst<int>(-1))); // does not work... // // 8conn // thrust::transform(thrust::make_zip_iterator(thrust::make_tuple( // thrust::make_permutation_iterator(ids, denseQueue.begin()), // thrust::make_permutation_iterator(q, denseQueue.begin()), // thrust::make_permutation_iterator(q_ym1xm1, denseQueue.begin()), // thrust::make_permutation_iterator(q_ym1xp1, denseQueue.begin()), // thrust::make_permutation_iterator(q_yp1xm1, denseQueue.begin()), // thrust::make_permutation_iterator(q_yp1xp1, denseQueue.begin()), // thrust::make_permutation_iterator(p_ym1xm1, denseQueue.begin()), // thrust::make_permutation_iterator(p_ym1xp1, denseQueue.begin()), // thrust::make_permutation_iterator(p_yp1xm1, denseQueue.begin()), // thrust::make_permutation_iterator(p_yp1xp1, denseQueue.begin()))), // thrust::make_zip_iterator(thrust::make_tuple( // thrust::make_permutation_iterator(ids, denseQueue.end()), // thrust::make_permutation_iterator(q, denseQueue.end()), // thrust::make_permutation_iterator(q_ym1xm1, denseQueue.end()), // thrust::make_permutation_iterator(q_ym1xp1, denseQueue.end()), // thrust::make_permutation_iterator(q_yp1xm1, denseQueue.end()), // thrust::make_permutation_iterator(q_yp1xp1, denseQueue.end()), // thrust::make_permutation_iterator(p_ym1xm1, denseQueue.end()), // thrust::make_permutation_iterator(p_ym1xp1, denseQueue.end()), // thrust::make_permutation_iterator(p_yp1xm1, denseQueue.end()), // thrust::make_permutation_iterator(p_yp1xp1, denseQueue.end()))), // thrust::make_zip_iterator(thrust::make_tuple(ym1xm1, ym1xp1, yp1xm1, yp1xp1)), // dummy.begin(), // ReconPixel<T, ReconNeighborhood, QueueElement>(-sx-1, -sx+1, sx-1, sx+1)); // // thrust::fill(testQueue.begin(), testQueue.end(), -1); // thrust::transform(image_first, image_last, testQueue.begin(), InitialImageToQueue<T, ReconNeighborhood>()); // queueSize = thrust::count_if(testQueue.begin(), testQueue.end(), GreaterThanConst<int>(-1)); // printf("test queue size : %d \n", queueSize); // and prepare the queue for the next iterations. //sparseQueue_end = thrust::unique(sparseQueue.begin(), sparseQueue.end()); queueSize = thrust::count_if(dummy.begin(), dummy.end(), thrust::identity<bool>()); // printf("here 7 : queueSize =%d \n", queueSize); denseQueue.resize(queueSize); thrust::fill(denseQueue.begin(), denseQueue.end(), -1); denseQueue_end = thrust::copy_if(ids, ids+area, dummy.begin(), denseQueue.begin(), thrust::identity<bool>()); printf("number of entries in queue: %lu \n", denseQueue_end - denseQueue.begin()); } if (stream == 0) cudaDeviceSynchronize(); else cudaStreamSynchronize(stream); cudaGetLastError(); printf("iterations: %d, total: %d\n", iterations, total); return total; } template unsigned int imreconQueueIntCaller<unsigned char>(unsigned char*, unsigned char*, const int, const int, const int, cudaStream_t ); }}
11,915
#include <stdio.h> #include <stdlib.h> #include <vector_types.h> #include <stdint.h> #include <assert.h> __device__ __host__ inline uint32_t binarySearch(uint64_t *values, uint64_t input, uint32_t len) { int32_t imin = 0; int32_t imax = len-1; while (imin <= imax) { uint32_t imid = imin + (imax - imin)/2; if (input < values[imid]) { imax = imid - 1; } else { imin = imid + 1; } } return (uint32_t) imin; } int main() { uint64_t data[] = {1,2,4,6,7,9}; for(int i=0; i<6; i++) { uint32_t result = binarySearch(data, data[i], 6); //printf("%u\n",result); } printf("%u\n",binarySearch(data, 0, 6)); printf("%u\n",binarySearch(data, 1, 6)); printf("%u\n",binarySearch(data, 2, 6)); printf("%u\n",binarySearch(data, 3, 6)); printf("%u\n",binarySearch(data, 4, 6)); printf("%u\n",binarySearch(data, 9, 6)); printf("%u\n",binarySearch(data, 10, 6)); return 0; }
11,916
#include <assert.h> #include <stdio.h> #include <algorithm> #include <stdlib.h> #include<iostream> #include "cuda.h" #define NUM (256*1024*1024) #define THREADS_PER_BLOCK_X 384 #define THREADS_PER_BLOCK_Y 1 #define THREADS_PER_BLOCK_Z 1 #define PROTECT_BITS (0xFFFF0000) __global__ void test_kernel( int* __restrict__ buf, int protectBits, int shrinkBits) { int x = blockDim.x * blockIdx.x + threadIdx.x; int address; address = (x & protectBits) | (x & shrinkBits); buf[address] = x; //printf("address[%d] tid:%d \n ",address,x); } using namespace std; int main() { int* hostA; int* deviceA; cudaEvent_t start, stop; cudaEventCreate (&start); cudaEventCreate (&stop); float eventMs = 1.0f; hostA = (int*)malloc(NUM * sizeof(int)); cudaMalloc((void**)& deviceA, NUM * sizeof(int)); cudaMemcpy(deviceA, hostA, NUM * sizeof(int), cudaMemcpyHostToDevice); test_kernel<<<dim3(1,1,1),dim3(1,1,1),0,0>>>( deviceA , 0x0, 0x0); for (int i = 16; i < 64 * 1024; i = i << 1) { cudaEventRecord(start, 0); test_kernel<<<dim3(NUM/THREADS_PER_BLOCK_X, 1, 1),dim3(THREADS_PER_BLOCK_X, 1, 1),0,0>>>(deviceA,PROTECT_BITS,i - 1); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&eventMs, start, stop); printf("elapsed time:%f\n", eventMs); int bandwidth = (double)NUM * sizeof(int) / 1024 / 1024 / 1024 / (eventMs / 1000); printf("Shrink Size in Bytes[%ld], bandwidth %d (GB/S)\n", i*sizeof(int), bandwidth); } cudaFree(deviceA); free(hostA); return 0; }
11,917
#include <stdio.h> #include <stdlib.h> #include "cuda.h" // to compile on pascal node: // nvcc -arch=sm_60 -o cudaFillVector cudaFillVector.cu __global__ void fillKernel(int N, int val, int *c_a){ // find index of thread relative to thread-block int t = threadIdx.x; // find index of thread-block int b = blockIdx.x; // find number of threads in thread-block int B = blockDim.x; // construct map from thread and thread-block indices into linear array index int n = t + b*B; // check index is in range if(n<N) c_a[n] = val; // work done by thread } int main(int argc, char **argv){ // 1. allocate HOST array int N = 1024; int *h_a = (int*) calloc(N, sizeof(int)); // 2. allocate DEVICE array int *c_a; cudaMalloc(&c_a, N*sizeof(int)); // 3. launch DEVICE fill kernel int T = 256; // number of threads per thread block int val = 999; // value to fill DEVICE array with dim3 G( (N+T-1)/T ); // number of thread blocks to use dim3 B(T); fillKernel <<< G,B >>> (N, val, c_a); // 4. copy data from DEVICE array to HOST array cudaMemcpy(h_a, c_a, N*sizeof(int), cudaMemcpyDeviceToHost); // 5. print out values on HOST for(int n=0;n<N;++n) printf("h_a[%d] = %d\n", n, h_a[n]); // 6. free arrays cudaFree(c_a); free(h_a); return 0; }
11,918
#include <cmath> #include <cufft.h> void cufft_core_execute(float* Signal_h, int Size, float2* SignalFFT_h) { float *Signal_d; float2 *SignalFFT_d; cudaMalloc((void**)&Signal_d, Size*sizeof(float)); cudaMalloc((void**)&SignalFFT_d, (Size/2+1)*sizeof(float2)); cudaMemcpy(Signal_d, Signal_h, Size*sizeof(float), cudaMemcpyHostToDevice); cufftHandle Plan; cufftPlan1d(&Plan, Size, CUFFT_R2C, 1); cufftExecR2C(Plan, (cufftReal*)Signal_d, (cufftComplex*)SignalFFT_d); cudaMemcpy(SignalFFT_h, SignalFFT_d, (Size/2+1)*sizeof(float2), cudaMemcpyDeviceToHost); cudaFree(Signal_d); cudaFree(SignalFFT_d); cufftDestroy(Plan); }
11,919
#include <stdio.h> #include <cuda_runtime.h> #include <stdint.h> __global__ void kernel() { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t n = tid; uint32_t sum = 0; uint32_t prod = 1; while(n != 0){ uint32_t digit = n % 10; n /= 10; sum += digit; prod *= digit; } if(sum*prod == tid) printf("%u\n", tid); return; } void checkrange(uint32_t range){ double dim = sqrt(range); printf("Checking %u for sum-product numbers\n", range); kernel<<<(uint32_t)dim, (uint32_t)ceil(range/(dim)), 0>>>(); cudaDeviceSynchronize(); } int main() { // main iteration checkrange(1024); checkrange(16777216); return 0; }
11,920
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25) { for (int i=0; i < var_1; ++i) { comp += var_3 + powf((var_4 * var_5), (-1.9858E-44f - acosf((+1.0257E-26f - powf((var_6 + asinf(fabsf((var_7 * ldexpf(+1.8798E35f, 2))))), +1.5273E34f * asinf(-1.8499E-37f * (-1.2484E-36f + (var_8 + var_9 * (+1.2591E-41f + var_10))))))))); comp = +0.0f * (-1.6673E-35f - (var_11 + (-1.3009E-35f - (+1.1342E35f - +1.4982E-35f)))); comp += -1.3040E34f + +0.0f / var_12 - (-1.9604E21f + coshf(-1.3701E-42f / (-1.0553E36f / -0.0f + fmodf((+1.1148E34f + +1.2900E-27f * var_13), sinhf(+1.4706E35f * (+1.6873E-36f * (-1.2804E-43f - (+1.5872E-41f + -1.7754E34f)))))))); if (comp >= (var_14 + var_15)) { float tmp_1 = -0.0f; float tmp_2 = -1.5222E-35f; float tmp_3 = +1.9014E36f; comp = tmp_3 - tmp_2 + tmp_1 * atanf(log10f((var_16 * -1.5509E-21f * var_17 / var_18))); } if (comp > +1.7469E-44f + (-1.6770E-44f + acosf(var_19 + +1.1268E36f))) { float tmp_4 = (-1.6707E-42f - (var_20 - atanf(var_21 - -1.9621E13f / var_22 - -0.0f))); float tmp_5 = +1.1750E34f; float tmp_6 = +1.5153E-43f; comp = tmp_6 / tmp_5 + tmp_4 / +1.7235E12f - var_23 + -1.3209E-42f; } for (int i=0; i < var_2; ++i) { comp += (+1.7389E-41f + var_24 + var_25 * -1.1585E35f); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26); cudaDeviceSynchronize(); return 0; }
11,921
#include "includes.h" __global__ void computePressure_CUDA(float* pressure, float* density, const int num, const float rho0, const float stiff) { const unsigned int i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (i >= num) return; pressure[i] = stiff * (powf((density[i] / rho0), 7) - 1.0f); //clamp if (pressure[i] < 0.0f) pressure[i] = 0.0f; return; }
11,922
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,float var_3,float var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13) { if (comp >= +1.3822E-42f - var_1 / var_2 * var_3 / var_4) { comp += (var_6 * -0.0f / var_7 / sinhf(+0.0f - var_8 * (+0.0f / +1.2899E-44f))); float tmp_1 = +1.7682E-35f; float tmp_2 = -1.5314E-36f; comp += tmp_2 * tmp_1 * -1.3259E-10f * (+1.4769E35f - -1.1545E-43f); for (int i=0; i < var_5; ++i) { float tmp_3 = +1.1548E13f; comp = tmp_3 - expf((var_9 - (var_10 - var_11 * var_12))); comp += -1.4539E36f - -1.0891E-43f - (-1.5014E34f / (+1.2168E35f / (var_13 + +1.7839E-35f))); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); int tmp_6 = atoi(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14); cudaDeviceSynchronize(); return 0; }
11,923
// Reads a cell at (x+dx, y+dy) __device__ int read_cell(int * source_domain, int x, int y, int dx, int dy, unsigned int domain_x, unsigned int domain_y) { x = (unsigned int)(x + dx) % domain_x; // Wrap around y = (unsigned int)(y + dy) % domain_y; return source_domain[y * domain_x + x]; } // Compute kernel __global__ void life_kernel(int * source_domain, int * dest_domain, int domain_x, int domain_y) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y; // Read cell int myself = read_cell(source_domain, tx, ty, 0, 0,domain_x, domain_y); // Read the 8 neighbors and count number of blue and red int blue = 0, red = 0, alive = 0; // if the cell is not empty, break out on alive neighboor count exceeding 3 for (int x_offset = -1 ; x_offset < 2 && (!myself || (alive < 4)) ; x_offset++) { for (int y_offset = -1 ; y_offset < 2 && (!myself || (alive < 4)) ; y_offset++) { // ignore self if (x_offset == 0 && y_offset == 0) continue; switch (read_cell (source_domain, tx, ty, x_offset, y_offset, domain_x, domain_y)) { case 1: red++; alive++; break; case 2: blue++; alive++; break; default: break; } } } // Compute new value // empty cell case if (!myself) { if (alive == 3) if (blue < red) myself = 1; else myself = 2; } // live cell cases else { // die cases if (alive != 2 && alive != 3) myself = 0; // else survive } // Write it in dest_domain dest_domain[ty * domain_x + tx] = myself; }
11,924
/** * Author:易培淮 * Mail:yiph@ihep.ac.cn * Function:Accelerate simulation with GPU */ #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> #include <time.h> #include <string.h> __device__ double generateRandom(curandState *state); __device__ void generateRandomInit(curandState *state); //错误处理宏 #define CHECK(call) \ {\ const cudaError_t error = call;\ if (error != cudaSuccess)\ {\ printf("Error:%s:%d, ", __FILE__, __LINE__);\ printf("code:%d, reason: %s\n", error, cudaGetErrorString(error));\ exit(1);\ }\ } //错误处理宏 #define CHECK_CURAND(call) \ {\ const cudaError_t error = call;\ if (error != CURAND_STATUS_SUCCESS)\ {\ printf("Error:%s:%d, ", __FILE__, __LINE__);\ printf("code:%d, reason: %s\n", error, cudaGetErrorString(error));\ exit(2);\ }\ } //内核函数 由__global__前缀修饰的函数在编译生成GPU代码,由CPU调用,并对CPU全局可见 __global__ void CDF_Sampling(double *pmt, double *hittime, double *result, int numElements) { int id = threadIdx.x; curandState state; generateRandomInit(&state); if (id < numElements) { double prob; prob = generateRandom(&state); double sum = 0; int n = 0; for (int item = 0; item < 10;item++) { sum += pmt[id*10+item]; if (prob <= sum) { n = item; printf("thread %d: hit times:%d\n", id, n); break; } } for (int item = 0;item < n;item++) { double prob2; prob2 = generateRandom(&state); double sum = 0; for (int j = 0; j < 10;j++) { sum += hittime[id*10+j]; if (prob2 <= sum) { result[id*10+item] = (double)j; printf("thread %d: %dth hit time %d\n", id, item+1,j); break; } } } } } //GPU调用的函数 由__device__前缀修饰的函数在GPU上运行,对CPU不可见 __device__ double generateRandom(curandState *state) { int id = threadIdx.x; double result = abs(curand_uniform_double(state)); printf("thread:%d random double: %f \n",id,result); return result; } __device__ void generateRandomInit(curandState *state) { int id = threadIdx.x; long seed = (unsigned long long)clock(); curand_init(seed, id, 0, state); } /** * Host main routine */ int main(void) { //生成假数据 int total_num = 100; int max_n = 10; int max_time = 10; size_t nBytes = total_num * max_n * sizeof(double); double *pmt; pmt = (double*)malloc(nBytes); for (int i = 0;i < total_num;i++) { for (int j = 0;j < max_n;j++) { pmt[i*max_n +j] = 0.1; } } double *hittime; hittime = (double*)malloc(nBytes); for (int i = 0;i < total_num;i++) { for (int j = 0;j < max_time;j++) { hittime[i*max_time+j] = 0.1; } } double *h_res = (double*)malloc(nBytes); //GPU计时,设置开始和结束事件 cudaEvent_t start, stop, gpu_start,gpu_stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventCreate(&gpu_start); cudaEventCreate(&gpu_stop); cudaEventRecord(start); //申请GPU内存 double *d_pmt, *d_hit,*d_result; CHECK(cudaMalloc((double**)&d_pmt,nBytes)); CHECK(cudaMalloc((double**)&d_hit, nBytes)); CHECK(cudaMalloc((double**)&d_result, nBytes)); //将CPU内存拷贝到GPU CHECK(cudaMemcpy(d_pmt, pmt, nBytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_hit, hittime, nBytes, cudaMemcpyHostToDevice)); //设置使用编号为0的GPU cudaSetDevice(0); //设置线程数量 dim3 block(total_num);//threadsPerBlock //设置块数量 dim3 grid(total_num / block.x);//blocksPerGrid cudaEventRecord(gpu_start); //调用核函数 CDF_Sampling <<<grid, block >>>(d_pmt, d_hit, d_result, total_num); cudaEventRecord(gpu_stop); cudaEventSynchronize(gpu_stop);//同步,强制CPU等待GPU event被设定 //从GPU拷贝数据到CPU CHECK(cudaMemcpy(h_res, d_result, nBytes, cudaMemcpyDeviceToHost)); cudaEventRecord(stop); cudaEventSynchronize(stop); float time; //计算用时,精度0.5us cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("total use time %f ms\n", time); cudaEventElapsedTime(&time, gpu_start, gpu_stop); cudaEventDestroy(gpu_start); cudaEventDestroy(gpu_stop); printf("gpu use time %f ms\n", time); for (int i = 0;i < total_num;i++) { for (int j = 0;j < max_time;j++) { printf("%f ",h_res[i*max_time+j]); } printf("\n"); } //释放GPU内存 CHECK(cudaFree(d_pmt)); CHECK(cudaFree(d_hit)); CHECK(cudaFree(d_result)); free(pmt); free(hittime); free(h_res); //清空所占GPU资源 cudaDeviceReset(); return 0; }
11,925
/* KAM PUI SO (ANTHONY) CS 510 GPU Homework 2 The Game of Life Rules: Any live cell with fewer than two live neighbours dies, as if caused by under-population. Any live cell with two or three live neighbours lives on to the next generation. Any live cell with more than three live neighbours dies, as if by overcrowding. Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction. */ #include <sys/time.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define WIDTH 70 #define HEIGHT 47 #define MAX 2000 #define LIMIT 512 #define NEIGHBORS 8 #define ROW 0 #define COL 1 // global const const int offsets[NEIGHBORS][2] = {{-1, 1},{0, 1},{1, 1}, {-1, 0}, {1, 0}, {-1,-1},{0,-1},{1,-1}}; __constant__ int offsets_dev[NEIGHBORS][2] = {{-1, 1},{0, 1},{1, 1}, {-1, 0}, {1, 0}, {-1,-1},{0,-1},{1,-1}}; /* The kernel that will execute on the GPU */ __global__ void step_kernel(int *board, int *result, int width, int height) { int n = width * height; int idx = blockDim.x * blockIdx.x + threadIdx.x; int num_neighbors = 0; int nx = 0; int ny = 0; int x = idx % width; int y = idx / width; int i = 0; for (i = 0; i < NEIGHBORS; i++) { // To make the board torroidal, we use modular arithmetic to // wrap neighbor coordinates around to the other side of the // board if they fall off. nx = (x + offsets_dev[i][ROW] + width) % width; ny = (y + offsets_dev[i][COL] + height) % height; if (board[ny * width + nx]) { num_neighbors++; } } // apply the Game of Life rules to this cell if (idx < n && ((board[idx] && num_neighbors==2) || num_neighbors==3)) result[idx] = 1; else result[idx] = 0; } /* This function encapsulates the process of creating and tearing down the * environment used to execute our game of life iteration kernel. The steps of the * process are: * 1. Allocate memory on the device to hold our board vectors * 2. Copy the board vectors to device memory * 3. Execute the kernel * 4. Retrieve the result board vector from the device by copying it to the host * 5. Free memory on the device */ void step_dev(int *board, int *result, int width, int height) { // Step 1: Allocate memory int *board_dev, *result_dev; int n = width * height; // Since cudaMalloc does not return a pointer like C's traditional malloc // (it returns a success status instead), we provide as it's first argument // the address of our device pointer variable so that it can change the // value of our pointer to the correct device address. cudaMalloc((void **) &board_dev, sizeof(int) * n); cudaMalloc((void **) &result_dev, sizeof(int) * n); // Step 2: Copy the input vectors to the device cudaMemcpy(board_dev, board, sizeof(int) * n, cudaMemcpyHostToDevice); // Step 3: Invoke the kernel // We allocate enough blocks (each 512 threads long) in the grid to // accomodate all `n` elements in the vectors. The 512 long block size // is somewhat arbitrary, but with the constraint that we know the // hardware will support blocks of that size. dim3 dimGrid((n + LIMIT - 1) / LIMIT, 1, 1); dim3 dimBlock(LIMIT, 1, 1); step_kernel<<<dimGrid, dimBlock>>>(board_dev, result_dev, width, height); // Step 4: Retrieve the results cudaMemcpy(result, result_dev, sizeof(int) * n, cudaMemcpyDeviceToHost); // Step 5: Free device memory cudaFree(board_dev); cudaFree(result_dev); } /* The old-fashioned CPU-only way to step thru game of life*/ void step(int *current, int *next, int width, int height) { // coordinates of the cell we're currently evaluating int x, y; // offset index, neighbor coordinates, alive neighbor count int i, nx, ny, num_neighbors; // write the next board state for (y=0; y<height; y++) { for (x=0; x<width; x++) { // count this cell's alive neighbors num_neighbors = 0; for (i = 0; i < NEIGHBORS; i++) { // To make the board torroidal, we use modular arithmetic to // wrap neighbor coordinates around to the other side of the // board if they fall off. nx = (x + offsets[i][ROW] + width) % width; ny = (y + offsets[i][COL] + height) % height; if (current[ny * width + nx]) { num_neighbors++; } } // apply the Game of Life rules to this cell next[y * width + x] = 0; if ((current[y * width + x] && num_neighbors==2) || num_neighbors==3) { next[y * width + x] = 1; } } } } // fill the board with random cells void fill_board(int *board, int width, int height) { int i; for (i = 0; i < (width * height); i++) board[i] = rand() % 2; } // print board image void print_board(int *board, int width, int height) { int x, y; for (y = 0; y<height; y++) { for (x = 0; x<width; x++) { char c = board[y * width + x] ? '#':' '; printf("%c", c); } printf("\n"); } printf("-----\n"); } // animate each cell void animate(int *current, int *next, int width, int height) { struct timespec delay = {0, 0}; // 0.005 seconds // struct timespec delay = {0, 125000000}; // 0.125 seconds // struct timespec delay = {0, 250000000}; // 0.25 seconds struct timespec remaining; // while (1) { for (int i = 0; i < MAX; ++i) { printf("%d\n", i); print_board(current, width, height); step_dev(current, next, width, height); // Copy the next state, that step() just wrote into, to current state memcpy(current, next, sizeof(int) * width * height); // We sleep only because textual output is slow and the console needs // time to catch up. We don't sleep in the graphical X11 version. nanosleep(&delay, &remaining); } } // main function int main(void) { // variable int width = WIDTH; int height = HEIGHT; int n = width * height; int *current = (int *) malloc(n* sizeof(int)); int *next = (int *) malloc(n * sizeof(int)); // initialize the global "current" fill_board(current, width, height); animate(current, next, width, height); // free memory free(current); free(next); return 0; }
11,926
#include "includes.h" __global__ void gradientRowsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD ) { __shared__ float s_Data[ROWS_GRAD_BLOCKDIM_Z][ROWS_GRAD_BLOCKDIM_Y][(ROWS_GRAD_RESULT_STEPS + 2 * ROWS_GRAD_HALO_STEPS) * ROWS_GRAD_BLOCKDIM_X]; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_GRAD_RESULT_STEPS - ROWS_GRAD_HALO_STEPS) * ROWS_GRAD_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_GRAD_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * ROWS_GRAD_BLOCKDIM_Z + threadIdx.z; d_Src += (baseZ * imageH + baseY) * imageW + baseX; d_Dst += (baseZ * imageH + baseY) * imageW + baseX; //Load main data #pragma unroll for (int i = ROWS_GRAD_HALO_STEPS; i < ROWS_GRAD_HALO_STEPS + ROWS_GRAD_RESULT_STEPS; i++) { s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_GRAD_BLOCKDIM_X] = d_Src[i * ROWS_GRAD_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_GRAD_HALO_STEPS; i++) { s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_GRAD_BLOCKDIM_X] = (baseX + i * ROWS_GRAD_BLOCKDIM_X >= 0) ? d_Src[i * ROWS_GRAD_BLOCKDIM_X] : 0; } //Load right halo #pragma unroll for (int i = ROWS_GRAD_HALO_STEPS + ROWS_GRAD_RESULT_STEPS; i < ROWS_GRAD_HALO_STEPS + ROWS_GRAD_RESULT_STEPS + ROWS_GRAD_HALO_STEPS; i++) { s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_GRAD_BLOCKDIM_X] = (baseX + i * ROWS_GRAD_BLOCKDIM_X < imageW) ? d_Src[i * ROWS_GRAD_BLOCKDIM_X] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_GRAD_HALO_STEPS; i < ROWS_GRAD_HALO_STEPS + ROWS_GRAD_RESULT_STEPS; i++) { float sum = 0; sum += s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_GRAD_BLOCKDIM_X + 1]; sum -= s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_GRAD_BLOCKDIM_X - 1]; sum *= 0.5f; d_Dst[i * ROWS_GRAD_BLOCKDIM_X] = sum; } }
11,927
#include "includes.h" __global__ void chooseLaw ( const int nwl, const int *kex, const float *didi11, const float *didi12, const float *didi13, float *didi1 ) { int i = threadIdx.x + blockDim.x * blockIdx.x; if ( i < nwl ) { didi1[i] = ( kex[i] == 0 ) * didi11[i] + ( kex[i] == 1 ) * didi12[i] + ( kex[i] == 2 ) * didi13[i]; } }
11,928
#include "thread-pool-runner.hh" #include "runtime-infos.hh" #include "kernels.hh" #include "../runtime/node.hh" #include "../runtime/nodes-list.hh" #include <chrono> #include <iostream> namespace cpu { namespace { bool task_ready(std::size_t pos, RuntimeInfos* infos) { for (auto n : infos->tasks_->preds()[pos]) if (!infos->tasks_status_[n]) return false; return true; } void exec_kernel(rt::Node* node) { std::size_t id = node->type; if (node->use_simd) id += KERNEL_SIMD_OFFSET; kernels_list[id](node); } void exec_graph(RuntimeInfos* infos) { while (infos->exec_graph) { std::size_t task = infos->next_task_++; if (task >= infos->tasks_->size()) break; rt::Node* node = infos->tasks_->nodes()[task]; //wait for predecessors to finish while (!task_ready(task, infos)) std::this_thread::sleep_for(std::chrono::milliseconds(1)); exec_kernel(node); infos->tasks_status_[task] = 1; } //no more operands to be executed but some didn't finish executing operations infos->exec_graph = false; //set flag if graph terminated for (auto s : infos->tasks_status_) if (s == 0) return; infos->graph_completed = true; } void thread_runner(RuntimeInfos* infos) { //std::cout << "begin\n"; while (!infos->quit) { if (infos->exec_graph) exec_graph(infos); else std::this_thread::sleep_for(std::chrono::milliseconds(10)); } //std::cout << "end\n"; } } ThreadPoolRunner::ThreadPoolRunner(std::size_t nthreads) { infos_ = new RuntimeInfos; infos_->quit = false; infos_->exec_graph = false; for (std::size_t i = 0; i < nthreads; ++i) ths_.emplace_back(&thread_runner, infos_); } ThreadPoolRunner::~ThreadPoolRunner() { infos_->quit = true; for (auto& t : ths_) t.join(); delete infos_; } void ThreadPoolRunner::run(rt::NodesList& tasks) { infos_->tasks_ = &tasks; infos_->next_task_ = 0; infos_->graph_completed = false; infos_->tasks_status_ = std::vector<int>(tasks.size(), 0); infos_->exec_graph = true; while (!infos_->graph_completed) { std::this_thread::sleep_for(std::chrono::milliseconds(100)); } } }
11,929
#include "includes.h" #define CUDA_CHECK_ERROR #define CudaSafeCall(err) __CudaSafeCall(err, __FILE__, __LINE__) #define CudaCheckError() __CudaCheckError(__FILE__, __LINE__) __global__ void maxpooling(float *output, const float *input, const int width, const int channels) { int thread_id = blockDim.x * blockIdx.x + threadIdx.x; int new_width = width / 2; int i = thread_id / new_width * 2; int j = thread_id % new_width * 2; int index = i * width + j; for (int c = 0; c < channels; c++) { float max = 0; if (max < input[index * channels + c]) max = input[index * channels + c]; if (max < input[(index + 1) * channels + c]) max = input[(index + 1) * channels + c]; if (max < input[(index + width) * channels + c]) max = input[(index + width) * channels + c]; if (max < input[(index + width + 1) * channels + c]) max = input[(index + width + 1) * channels + c]; output[thread_id * channels + c] = max; } }
11,930
/*---------- * Streams - simple multi-stream example * GPU Pro Tip: CUDA 7 Streams simplify concurrency * NVIDIA Developer Blog * Autor: Mark Harris * ---------- * Universidad del Valle * Programación de Microprocesadores * Mod.: K.Barrera, J.Celada * Semestre 2 2020 * ---------- */ #include <stdio.h> #include <math.h> //(const int N = 1 << 20; const int N = 100; __global__ void kernel(float *x, int n) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < n; i += blockDim.x * gridDim.x) { x[i] = sqrt(pow(3.14159,i)); } } int main() { const int num_streams = 2; cudaStream_t streams[num_streams]; float *h_data[num_streams], *d_data[num_streams]; for (int i = 0; i < num_streams; i++) { cudaStreamCreate(&streams[i]); h_data[i] = (float *)malloc(N*sizeof(float)); cudaMalloc(&d_data[i], N * sizeof(float)); // launch one worker kernel per stream kernel<<<1, 64, 0, streams[i]>>>(d_data[i], N); cudaMemcpyAsync(h_data[i], d_data[i], N*sizeof(float),cudaMemcpyDeviceToHost, streams[i]); // launch a dummy kernel on the default stream kernel<<<1, 1>>>(0, 0); } for(int i = 0; i < N; i++) printf("Value %d is: %f\n", i, h_data[0][i]); cudaDeviceReset(); return 0; }
11,931
#include "includes.h" __global__ void naive_normalized_cross_correlation( float* d_response, unsigned char* d_original, unsigned char* d_template, int num_pixels_y, int num_pixels_x, int template_half_height, int template_height, int template_half_width, int template_width, int template_size, float template_mean ) { int ny = num_pixels_y; int nx = num_pixels_x; int knx = template_width; int2 image_index_2d = make_int2((blockIdx.x * blockDim.x) + threadIdx.x, (blockIdx.y * blockDim.y) + threadIdx.y); int image_index_1d = (nx * image_index_2d.y) + image_index_2d.x; if (image_index_2d.x < nx && image_index_2d.y < ny) { // // compute image mean // float image_sum = 0.0f; for (int y = -template_half_height; y <= template_half_height; y++) { for (int x = -template_half_width; x <= template_half_width; x++) { int2 image_offset_index_2d = make_int2(image_index_2d.x + x, image_index_2d.y + y); int2 image_offset_index_2d_clamped = make_int2(min(nx - 1, max(0, image_offset_index_2d.x)), min(ny - 1, max(0, image_offset_index_2d.y))); int image_offset_index_1d_clamped = (nx * image_offset_index_2d_clamped.y) + image_offset_index_2d_clamped.x; unsigned char image_offset_value = d_original[image_offset_index_1d_clamped]; image_sum += (float)image_offset_value; } } float image_mean = image_sum / (float)template_size; // // compute sums // float sum_of_image_template_diff_products = 0.0f; float sum_of_squared_image_diffs = 0.0f; float sum_of_squared_template_diffs = 0.0f; for (int y = -template_half_height; y <= template_half_height; y++) { for (int x = -template_half_width; x <= template_half_width; x++) { int2 image_offset_index_2d = make_int2(image_index_2d.x + x, image_index_2d.y + y); int2 image_offset_index_2d_clamped = make_int2(min(nx - 1, max(0, image_offset_index_2d.x)), min(ny - 1, max(0, image_offset_index_2d.y))); int image_offset_index_1d_clamped = (nx * image_offset_index_2d_clamped.y) + image_offset_index_2d_clamped.x; unsigned char image_offset_value = d_original[image_offset_index_1d_clamped]; float image_diff = (float)image_offset_value - image_mean; int2 template_index_2d = make_int2(x + template_half_width, y + template_half_height); int template_index_1d = (knx * template_index_2d.y) + template_index_2d.x; unsigned char template_value = d_template[template_index_1d]; float template_diff = template_value - template_mean; float image_template_diff_product = image_offset_value * template_diff; float squared_image_diff = image_diff * image_diff; float squared_template_diff = template_diff * template_diff; sum_of_image_template_diff_products += image_template_diff_product; sum_of_squared_image_diffs += squared_image_diff; sum_of_squared_template_diffs += squared_template_diff; } } // // compute final result // float result_value = 0.0f; if (sum_of_squared_image_diffs != 0 && sum_of_squared_template_diffs != 0) { result_value = sum_of_image_template_diff_products / sqrt(sum_of_squared_image_diffs * sum_of_squared_template_diffs); } d_response[image_index_1d] = result_value; } }
11,932
#include "includes.h" __global__ void find_all_sums_kernel(bool *mask, double *node_weight, int *neighbor, int *neighbor_start, double *neighbor_accum_weight_result, double *sum_weight_result, int width, int height){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int nid = y * width + x; // thread_index is node id if (x < width && y < height && mask[nid]){ double sum = 0.0; int end = min(neighbor_start[nid+1], neighbor_start[nid]+HUB_THREASHOLD+1); //+1 because HUB_THREASHOLD is out degree for (int eid = neighbor_start[nid]; eid < end; eid++) { // this eid is just index of the neighbor in the neighbor array sum += node_weight[neighbor[eid]]; neighbor_accum_weight_result[eid] = sum; } sum_weight_result[nid] = sum; } }
11,933
#include <iostream> using std::cerr; using std::endl; // Error handling macro #define CUDA_CHECK(call) \ if((call) != cudaSuccess) { \ cudaError_t err = cudaGetLastError(); \ cerr << "CUDA error calling \""#call"\", code is " << err << endl;} #include<stdio.h> struct dataElem { int val; char* name; }; __global__ void kernal(dataElem* e){ printf("From the device %s\n", e->name); }; int main() { dataElem* e; CUDA_CHECK(cudaMallocManaged((void**)&e, sizeof(dataElem))); e->val = 10; cudaMallocManaged((void**)&(e->name), sizeof(char)*(strlen("hello")+1) ); strcpy(e->name, "hello"); printf("From the host %s\n", e->name); // kernal<<<1,1>>>(e); // cudaDeviceSynchronize(); return 0; }
11,934
#include <iostream> __global__ void twice(float *array, size_t pitch, size_t row, size_t col) { int i = threadIdx.x; int j = threadIdx.y; if (!(i < row && j < col)) { return; } float *array_row = reinterpret_cast<float *>(reinterpret_cast<char *>(array) + i * pitch); array_row[j] = array_row[j] * 2.0; } int main() { const int row = 16; const int col = 16; float *array = new float[row * col]; // float array[16][16]; for (auto i = 0; i < row; ++i) { for (auto j = 0; j < col; ++j) { array[i * row + j] = 1; } } size_t pitch = 0; float *gpu_array = nullptr; cudaMallocPitch(&gpu_array, &pitch, col * sizeof(float), row); std::cout << "pitch: " << pitch << std::endl; cudaMemcpy2D(gpu_array, pitch, array, col * sizeof(float), col * sizeof(float), row, cudaMemcpyHostToDevice); dim3 block_dim(16, 16); twice<<<1, block_dim>>>(gpu_array, pitch, row, col); for (auto i = 0; i < row; ++i) { for (auto j = 0; j < col; ++j) { array[i * row + j] = 0.0; } } float* host_array = array; float* device_array = gpu_array; // for (auto i = 0; i < row; host_array += col, device_array = reinterpret_cast<float *>(reinterpret_cast<char *>(device_array) + pitch), ++i) { // auto err = cudaMemcpy(host_array, device_array, sizeof(float) * col, cudaMemcpyDeviceToHost); // std::cout << cudaGetErrorString(err) << std::endl; // } cudaMemcpy2D(array, col * sizeof(float), gpu_array, pitch, col * sizeof(float), row, cudaMemcpyDeviceToHost); bool ok = true; for (auto i = 0; i < row; ++i) { for (auto j = 0; j < col; ++j) { std::cout << "array[" << i << "][" << j << "] = " << array[i * row + j] << std::endl; if (!(array[i * row + j] > 2.0f - 0.001f && array[i * row + j] < 2.0f + 0.001f)) { std::cout << "array[" << i << "][" << j << "] = " << array[i * row + j] << std::endl; ok = false; break; } } if (!ok) break; } if (ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } cudaFree(gpu_array); delete[] array; return 0; }
11,935
#include "includes.h" const int listLength = 700; __global__ void squareKernel(float* d_in, float *d_out, int threads_num) { const unsigned int lid = threadIdx.x; // local id inside a block const unsigned int gid = blockIdx.x*blockDim.x + lid; // global id if (gid < threads_num){ d_out[gid] = powf((d_in[gid]/(d_in[gid]-2.3)),3); }// do computation }
11,936
#include <stdio.h> // Reference??? const int N = 1024; const int blocksize = 16; __global__ void add_matrix( float *a, float *b, float *c, int N, float rf, float pirkplus1) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j*N; if ( i < N && j < N ) c[index] = rf*__sinf((float)(i+1)*(float)(j+1)*pirkplus1); } int main(void){ float *a = new float[N*N]; float *b = new float[N*N]; float *c = new float[N*N]; float two=2.0f, one=1.0f; float pi,rkplus1,rf; // Generate square orthonormal matrices pi = two * asin(one); rkplus1 = one/(float(N) + one); rf = sqrt(two*rkplus1); for ( int i = 0; i < N*N; ++i ) { a[i] = 1.0f; b[i] = 3.5f; } float *ad, *bd, *cd; const int size = N*N*sizeof(float); cudaMalloc( (void**)&ad, size ); cudaMalloc( (void**)&bd, size ); cudaMalloc( (void**)&cd, size ); cudaMemcpy( ad, a, size, cudaMemcpyHostToDevice ); // COPY DATA TO GPU cudaMemcpy( bd, b, size, cudaMemcpyHostToDevice ); dim3 dimBlock( blocksize, blocksize ); dim3 dimGrid( N/dimBlock.x, N/dimBlock.y ); add_matrix<<<dimGrid, dimBlock>>>( ad, bd, cd, N, rf, pi*rkplus1 ); cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost ); for (int i = 0; i < 10;i++) { printf(" %7.5f", c[i]); } printf("\n"); cudaFree( ad ); cudaFree( bd ); cudaFree( cd ); // CLEAN UP, RETURN return 0; }
11,937
/* * vadd.cu: * */ #include <stdio.h> #include <sys/time.h> #include <cuda_runtime.h> enum { NELEMS = 1024 * 1024 }; __global__ void vadd(const float *a, const float *b, float *c, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) c[i] = a[i] + b[i]; } int main() { size_t size = sizeof(float) * NELEMS; /* Allocate vectors on host */ float *h_A = (float *)malloc(size); float *h_B = (float *)malloc(size); float *h_C = (float *)malloc(size); if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Allocation error.\n"); exit(EXIT_FAILURE); } for (int i = 0; i < NELEMS; ++i) { h_A[i] = rand() / (float)RAND_MAX; h_B[i] = rand() / (float)RAND_MAX; } /* Allocate vectors on device */ float *d_A = NULL, *d_B = NULL, *d_C = NULL; if (cudaMalloc((void **)&d_A, size) != cudaSuccess) { fprintf(stderr, "Allocation error\n"); exit(EXIT_FAILURE); } if (cudaMalloc((void **)&d_B, size) != cudaSuccess) { fprintf(stderr, "Allocation error\n"); exit(EXIT_FAILURE); } if (cudaMalloc((void **)&d_C, size) != cudaSuccess) { fprintf(stderr, "Allocation error\n"); exit(EXIT_FAILURE); } /* Copy the host vectors to device */ if (cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "Host to device copying failed\n"); exit(EXIT_FAILURE); } if (cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "Host to device copying failed\n"); exit(EXIT_FAILURE); } /* Launch the kernel */ int threadsPerBlock = 256; int blocksPerGrid =(NELEMS + threadsPerBlock - 1) / threadsPerBlock; vadd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, NELEMS); if (cudaGetLastError() != cudaSuccess) { fprintf(stderr, "Failed to launch kernel!\n"); exit(EXIT_FAILURE); } /* Copy the device vectors to host */ if (cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) != cudaSuccess) { fprintf(stderr, "Device to host copying failed\n"); exit(EXIT_FAILURE); } for (int i = 0; i < NELEMS; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); cudaDeviceReset(); return 0; }
11,938
#include "includes.h" __global__ void convertPitchedFloatToRGBA_kernel(uchar4 *out_image, const float *in_image, int width, int height, int pitch, float lowerLim, float upperLim) { const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y; uchar4 temp; if (x < width && y < height) { float val = *((float *)((char *)in_image + y * pitch) + x); // first draw unmatched pixels in white if (!isfinite(val)) { temp.x = 255; temp.y = 255; temp.z = 255; temp.w = 255; } else { // rescale value from [lowerLim,upperLim] to [0,1] val -= lowerLim; val /= (upperLim - lowerLim); float r = 1.0f; float g = 1.0f; float b = 1.0f; if (val < 0.25f) { r = 0; g = 4.0f * val; } else if (val < 0.5f) { r = 0; b = 1.0 + 4.0f * (0.25f - val); } else if (val < 0.75f) { r = 4.0f * (val - 0.5f); b = 0; } else { g = 1.0f + 4.0f * (0.75f - val); b = 0; } temp.x = 255.0 * r; temp.y = 255.0 * g; temp.z = 255.0 * b; temp.w = 255; } out_image[__mul24(y, width) + x] = temp; } }
11,939
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> // matrix multiplication: C = AB // A #define A_HEIGHT 1024 #define A_WIDTH 1024 #define A_N A_HEIGHT * A_WIDTH // B #define B_HEIGHT A_WIDTH #define B_WIDTH 1024 #define B_N B_HEIGHT * B_WIDTH // C #define C_HEIGHT A_HEIGHT #define C_WIDTH B_WIDTH #define C_N C_HEIGHT * C_WIDTH #define BLOCK_SIZE 32 #define MAX_ERR 1e-6 __global__ void matrix_mul(double *d_C, double *d_A, double *d_B, int d_a_height, int d_a_width, int d_b_width) { int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; if(rid < d_a_height && cid < d_b_width){ // sum: to evaluated dot product double sum = 0.0; for(int k = 0; k < d_a_width; k++){ sum += d_A[rid * d_a_width + k] * d_B[d_b_width*k + cid]; } d_C[rid * d_b_width + cid] = sum; } } // float float_rand( float min, float max ) // { // float scale = rand() / (float) RAND_MAX; // return min + scale * ( max - min ); // } int main(){ double *h_A, *h_B, *h_C; double *d_A, *d_B, *d_C; double *h_ref; // compute verified matMul // Allocate host memory h_A = (double*)malloc(sizeof(double) * A_N); h_B = (double*)malloc(sizeof(double) * B_N); h_C = (double*)malloc(sizeof(double) * C_N); h_ref = (double*)malloc(sizeof(double) * C_N); // Initialize host arrays /*** TEST 1 ***/ // for(int i = 0; i < A_HEIGHT; i++){ // for(int j = 0; j < A_WIDTH; j++){ // h_A[i*A_WIDTH + j] = (float)i; // } // } // for(int i = 0; i < B_HEIGHT; i++){ // for(int j = 0; j < B_WIDTH; j++){ // h_B[i*B_WIDTH + j] = (float)i; // } // } srand((unsigned int)time(NULL)); /*** TEST 2 ***/ for (int i = 0; i< A_N; i++){ h_A[i] = (double)rand()/(double)(RAND_MAX); } for (int i = 0; i< B_N; i++){ h_B[i] = (double)rand()/(double)(RAND_MAX); } // Allocate device memory cudaMalloc((void**)&d_A, sizeof(double) * A_N); cudaMalloc((void**)&d_B, sizeof(double) * B_N); cudaMalloc((void**)&d_C, sizeof(double) * C_N); // Transfer data from host to device memory cudaMemcpy(d_A, h_A, sizeof(double) * A_N, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, sizeof(double) * B_N, cudaMemcpyHostToDevice); // Executing kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // Note C_mat row maps to x dimension, and col maps to y dimension dim3 dimGrid(C_HEIGHT / BLOCK_SIZE + 1, C_WIDTH / BLOCK_SIZE + 1); // dim3 dimGrid(2, 1); matrix_mul<<<dimGrid,dimBlock>>>(d_C, d_A, d_B, A_HEIGHT, A_WIDTH, B_WIDTH); // Transfer data back to host memory cudaMemcpy(h_C, d_C, sizeof(double) * C_N, cudaMemcpyDeviceToHost); // Verification for(int i = 0; i < C_HEIGHT; i++){ for(int j = 0; j < C_WIDTH; j++){ double sum = 0.0; for(int k = 0; k < A_WIDTH; k++){ sum += h_A[i*A_WIDTH+k] * h_B[k*B_WIDTH + j]; } h_ref[i * C_WIDTH + j] = sum; assert(fabs(h_ref[i*C_WIDTH + j] - h_C[i * C_WIDTH + j]) < MAX_ERR); // printf("h_c[%d][%d] = %f\n", i, j, h_C[i * C_WIDTH + j]); // printf("h_ref[%d][%d] = %f\n", i, j, h_ref[i * C_WIDTH + j]); } } printf("PASSED\n"); // Deallocate device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // Deallocate host memory free(h_A); free(h_B); free(h_C); }
11,940
#include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/replace.h> #include <thrust/functional.h> #include <thrust/sort.h> #include <thrust/functional.h> #include <iostream> #include <cstdlib> #include <cstdio> #include <ctime> // Device String Comparator Function template <typename T> struct greater_functor { thrust::device_ptr<unsigned long> gene; unsigned long i,j; greater_functor(thrust::device_ptr<unsigned long> _gene) : gene(_gene) {} __device__ int operator()( T x, T y){ for(i = x,j=y;; j++,i++) { if(gene[i] == (unsigned long)'\0') return(0<1); if (gene[j]== (unsigned long)'\0') return(1<0); if(gene[i] != gene[j]) return(gene[i] < gene[j]); } } }; // Allocate space on device and copy genome onto it // call Thrust::stable_sort function with our custom comparator. void sort_Suffixes(unsigned long* gene, thrust::device_vector<unsigned long>& A ,unsigned long N){ unsigned long *dgene; cudaMalloc((void **) &dgene, (N+1) * sizeof(unsigned long)); cudaMemcpy(dgene,gene, (N+1) * sizeof(unsigned long), cudaMemcpyHostToDevice); thrust::device_ptr<unsigned long> dev_ptr(dgene); thrust::stable_sort(A.begin(),A.end(),greater_functor<unsigned long>(dev_ptr)); cudaFree(dgene); } void print_suffix_list(thrust::device_vector<unsigned long>& list, unsigned long len, char* genome){ int i=0; for(i=0; i<len; i++){ printf("%ld: %s\n", (unsigned long)list[i], genome+(unsigned long)list[i]); } } void read_genome2(char *filename, char *buffer, unsigned long num){ FILE *fh; fh = fopen(filename, "r"); fread(buffer, 1, num, fh); buffer[num] = '\0'; fclose(fh); } unsigned long setup(unsigned long num, char* filename, char** genome){ *genome = (char *) malloc((num+1)*sizeof(char)); read_genome2(filename, *genome, num); return (strlen(*genome)); } int main(int argc, char* argv[]) { if(argc < 5){ printf("Less Arguments!! \n"); return 0; } unsigned long count = atol(argv[1]); unsigned long increaseSize = atol(argv[2]); unsigned long maxSize = atol(argv[3]); while(count <= maxSize){ char * genome; unsigned long N = setup(count,argv[4], &genome); unsigned long i = 0; unsigned long * genome2; genome2 =(unsigned long *)malloc(N*sizeof(unsigned long)); for(i=0;i<N;i++){ genome2[i] = (unsigned long)genome[i]; } free(genome); thrust::device_vector<unsigned long> A(count); thrust::sequence(A.begin(),A.end()); clock_t start, end; double runTime; start = clock(); try { sort_Suffixes(genome2, A,N); } catch(thrust::system_error e) // Terminate Gracefully { std::cerr << "Error inside sort: " << e.what() << std::endl; } end = clock(); runTime = (end - start) / (double) CLOCKS_PER_SEC ; printf("%ld %f\n",count, runTime); count = count + increaseSize; } }
11,941
#include <stdio.h> #include <cuda.h> __global__ void multMats(float * A, float * B, float * C, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //copy final element value to the C matrix if (row < m && col < k){ float elementC = 0; for (int i = 0; i < n; ++i) { elementC += A[row*n+i]*B[i*n+col]; } C[row*k+col] = elementC; } } int main(int argc, char ** argv) { float *hostA; float *hostB; float *hostC; float *deviceA; float *deviceB; float *deviceC; int m = 512; // number of A rows int n = 512; // number of A columns (or B rows) int k = 512; // number of B columns cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //allocate data in host hostA = (float *) malloc(m * n * sizeof(float)); hostB = (float *) malloc(n * k * sizeof(float)); hostC = (float *) malloc(m * k * sizeof(float)); for (int i = 0; i < m*n; i++)//Matrix Initialization hostA[i]=1.0; for (int i = 0; i < n*k; i++) hostB[i]=1.0; //allocate data in device cudaMalloc((void **) &deviceA, m * n * sizeof(float)); cudaMalloc((void **) &deviceB, n * k * sizeof(float)); cudaMalloc((void **) &deviceC, m * k * sizeof(float)); //copy inputs to device cudaMemcpy(deviceA, hostA, m * n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB, n * k * sizeof(float), cudaMemcpyHostToDevice); //device kernal int threadsPerBlock = 32; int n_blocks = ceil(n*n/32.0); printf("CUDA kernel launch with %d blocks of %d threads\n", n_blocks, threadsPerBlock); printf("Inactive Threads %d \n", (n_blocks*threadsPerBlock)-(n*n)); cudaEventRecord(start); multMats<<<n_blocks, threadsPerBlock>>>(deviceA, deviceB, deviceC, m, n, k); cudaThreadSynchronize(); cudaEventRecord(stop); //copy result back to host cudaMemcpy(hostC, deviceC, m * k * sizeof(float), cudaMemcpyDeviceToHost); // Blocks CPU execution until stop has been recorded cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Elapsed Time: %f milliseconds\n", milliseconds); // Destroying events cudaEventDestroy(start); cudaEventDestroy(stop); //deallocate device cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); //deallocate host free(hostA); free(hostB); free(hostC); return 0; }
11,942
#include <time.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> __global__ void hybridGPuEuler(float *y, float y_0 ,int N) { int myID = threadIdx.x + blockDim.x * blockIdx.x; if (myID <N) { y[myID] = y[myID] + y_0; } } float* hybridCPUEuler(float t_0, float delta_t){ int n=10/delta_t +1; float *s = (float*) malloc(sizeof(float)*n); s[0]=0; for (int i=0;i<n-1;i++){ s[i+1]=s[i]+delta_t*(9*powf(i*delta_t,2)-4*(i*delta_t)+5); } return s; } int main(int argc, char const *argv[]) { printf("seccion 1.c\n"); int hilos1c = 256,n1c,bloque1c; float delta_t1c,tiempoGPU1c; float *dev_e1c,*hst_y; clock_t startcpu1c, endcpu1c; cudaEvent_t startgpu1c, endgpu1c; for(int i=1;i<7;i++) { delta_t1c=powf(10,-i); n1c=10/delta_t1c +1; startcpu1c = clock(); hst_y = hybridCPUEuler(0,delta_t1c); endcpu1c = clock(); bloque1c = ceil((float) n1c /hilos1c); cudaEventCreate(&startgpu1c); cudaEventCreate(&endgpu1c); cudaEventRecord(startgpu1c,0); cudaMalloc( (void**) &dev_e1c, n1c*sizeof(float)); cudaMemcpy(dev_e1c,hst_y,n1c*sizeof(float),cudaMemcpyHostToDevice); hybridGPuEuler<<<bloque1c,hilos1c>>>(dev_e1c,4,n1c); cudaEventRecord(endgpu1c,0); cudaEventSynchronize(endgpu1c); cudaEventElapsedTime(&tiempoGPU1c,startgpu1c,endgpu1c); cudaMemcpy(hst_y,dev_e1c,n1c*sizeof(float),cudaMemcpyDeviceToHost); cudaFree(dev_e1c); free(hst_y); cudaEventDestroy(startgpu1c); cudaEventDestroy(endgpu1c); double cpu_time_used = ((double) (endcpu1c - startcpu1c)) * 1000 / CLOCKS_PER_SEC; printf("tiempo en CPU: %f ms, tiempo en GPU: %f ms y el tiempo total es: %f ms\n", cpu_time_used ,tiempoGPU1c,cpu_time_used+tiempoGPU1c); } return 0; }
11,943
#include <cuda_runtime.h> // Type your code here, or load an example. struct View { float *mx, *my, *mz; __device__ __forceinline__ float& x(int i) { return mx[i]; } __device__ __forceinline__ float x(int i) const { return __ldg(mx + i); } __device__ __forceinline__ float& y(int i) { return my[i]; } __device__ __forceinline__ float y(int i) const { return __ldg(my + i); } __device__ __forceinline__ float& z(int i) { return mz[i]; } __device__ __forceinline__ float z(int i) const { return __ldg(mz + i); } }; __global__ void cross(View const * pvi, View * pvo, int n) { auto const & vi = *pvi; auto & vo = *pvo; int tid = blockIdx.x; if (tid >= n) return; vo.x(tid) = vi.y(tid)*vi.z(tid); vo.y(tid) = vi.x(tid)*vi.z(tid); vo.z(tid) = vi.x(tid)*vi.y(tid); } __global__ void crossO(View const * pvi, View * pvo, int n) { auto const & vi = *pvi; auto & vo = *pvo; int tid = blockIdx.x; if (tid >= n) return; auto x = vi.x(tid); auto y = vi.y(tid); auto z = vi.z(tid); vo.x(tid) = y*z; vo.y(tid) = x*z; vo.z(tid) = x*y; } __global__ void cross(float const * __restrict__ xi, float const * __restrict__ yi, float const * __restrict__ zi, float *xo, float *yo, float *zo, int n){ int tid = blockIdx.x; if (tid >= n) return; xo[tid] = yi[tid]*zi[tid]; yo[tid] = xi[tid]*zi[tid]; zo[tid] = xi[tid]*yi[tid]; }
11,944
#include "includes.h" __global__ void calcDetectObjectsForwardGPU(float *in, float *out, int in_size_x, int in_size_y, int in_size_z, int max_bounding_boxes, int max_classes ) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; for( int i = 0; i < max_bounding_boxes; i=i+(4+max_classes)){ int index = id * (in_size_x * in_size_y * in_size_z) + i; out[index ] = 1.0f / (1.0f + exp( -in[index ] )); // x: sigmoid out[index+1] = 1.0f / (1.0f + exp( -in[index+1] )); // y: sigmoid out[index+2] = exp( in[index+2] ); // w: exp out[index+3] = exp( in[index+3] ); // h: exp for( int c = 0; c < max_classes; ++c){ int index2 = id * (in_size_x * in_size_y * in_size_z) + i+4+c; out[index2] = 1.0f / (1.0f + exp( -in[index2] )); // id: sigmoid } } /* original for(int b = 0; b < in.size.b; ++b ){ for( int i = 0; i < _max_bounding_boxes; i=i+(4+_max_classes)){ out( b, i , 0, 0 ) = 1.0f / (1.0f + exp( -in( b, i , 0, 0 ) )); // x: sigmoid out( b, i+1, 0, 0 ) = 1.0f / (1.0f + exp( -in( b, i+1, 0, 0 ) )); // y: sigmoid out( b, i+2, 0, 0 ) = exp( in( b, i+2, 0, 0 ) ); // w: exp out( b, i+3, 0, 0 ) = exp( in( b, i+3, 0, 0 ) ); // h: exp for( int c = 0; c < _max_classes; ++c){ out( b, i+4+c, 0, 0 ) = 1.0f / (1.0f + exp( -in( b, i+4+c , 0, 0 ) )); // id: sigmoid } } } */ }
11,945
#include <stdio.h> __global__ void add(int *a, int *b, int *c) { c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } #define N 16 void check(cudaError_t err) { if (err != cudaSuccess) printf("The error is %s.\n", cudaGetErrorString(err)); } void print_array(int *arr, int len) { for (int row = 0; row < 16; row++) { for (int i = 0; i < len; i++) printf("%d ", arr[i]); printf("\n"); } } void random_ints(int *a, int n) { int i; for (i = 0; i < n; i++) a[i] = (int)(rand() / (RAND_MAX / 1.5)); } int main(void) { int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c int size = N * sizeof(int); // Allocate space for device copies of a, b, c check((cudaError_t)cudaMalloc((void **)&d_a, size)); check((cudaError_t)cudaMalloc((void **)&d_b, size)); check((cudaError_t)cudaMalloc((void **)&d_c, size)); // Allocate space for host copies of a, b, c and setup input values a = (int*)malloc(size); random_ints(a, N); b = (int*)malloc(size); random_ints(b, N); c = (int*)malloc(size); // Copy inputs to device check((cudaError_t)cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice)); check((cudaError_t)cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice)); // Launch all() kernel on GPU add<<<N,1>>>(d_a, d_b, d_c); // Copy result back to host check((cudaError_t)cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost)); // Print results printf("Array a:\n"); print_array(a, N); printf("Array b:\n"); print_array(b, N); printf("Sum of a and b:\n"); print_array(c, N); // Cleanup free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
11,946
//Copyright (c) 2018 ETH Zurich, Lukas Cavigelli #include <math.h> #include <cuda_runtime.h> #include <cuda_fp16.h> using namespace std; typedef __half half; __global__ void changeDetection_1x1_kernel( const half* __restrict__ input, half* __restrict__ inputState, bool* __restrict__ changeMap, const int width, const int height, const int nInputPlane, const float diffThreshold_float, const bool updateInputState) { // compute pixel index int pxlInpIdx = blockIdx.x * blockDim.x + threadIdx.x; if(pxlInpIdx >= height * width) return; //check for changes at the pixel location; any feature map can trigger a change half diffThreshold = __float2half(diffThreshold_float); bool change = false; for (int i = 0; i < nInputPlane; ++i) { int idx = i*(height*width) + pxlInpIdx; // !! this expression might need the long datatype for high res half diff = __hsub(inputState[idx], input[idx]); change |= __hgt(diff, diffThreshold) | __hlt(diff, __hneg(diffThreshold)); } if(!change) return; // no need to mark outputs/proceed, if not changed // mark pixels in the support of the changed pixel for updating changeMap[pxlInpIdx] = true; //update prevInput if with copyChanges/feedback if(updateInputState) { // implicit (&& change), otherwise already returned for (int i = 0; i < nInputPlane; ++i) { int idx = i*(height*width) + pxlInpIdx; // !! this expression might need the long datatype for high res inputState[idx] = input[idx]; } } } __global__ void changeDetection_kernel( const half* __restrict__ input, half* __restrict__ inputState, bool* __restrict__ changeMap, const int width, const int height, const int nInputPlane, const int kHHalf, const int kWHalf, const float diffThreshold_float, const bool updateInputState) { // compute pixel index int pxlInpIdx = blockIdx.x * blockDim.x + threadIdx.x; if(pxlInpIdx >= height * width) return; //check for changes at the pixel location; any feature map can trigger a change half diffThreshold = __float2half(diffThreshold_float); bool change = false; for (int i = 0; i < nInputPlane; ++i) { int idx = i*(height*width) + pxlInpIdx; // !! this expression might need the long datatype for high res half diff = __hsub(inputState[idx], input[idx]); change |= __hgt(diff, diffThreshold) | __hlt(diff, __hneg(diffThreshold)); } if(!change) return; // no need to mark outputs/proceed, if not changed // mark pixels in the support of the changed pixel for updating int xIn = pxlInpIdx % width; int yIn = pxlInpIdx / width; for (int k = -kHHalf; k <= kHHalf; ++k) { int yOut = yIn + k; for (int l = -kWHalf; l <= kWHalf; ++l) { int xOut = xIn + l; if(yOut>=0 && yOut<height && xOut>=0 && xOut<width) { changeMap[yOut*width + xOut] = true; } } } //update prevInput if with copyChanges/feedback if(updateInputState) { // implicit (&& change), otherwise already returned for (int i = 0; i < nInputPlane; ++i) { int idx = i*(height*width) + pxlInpIdx; // !! this expression might need the long datatype for high res inputState[idx] = input[idx]; } } } extern "C" { void changeDetection(int gridz, int gridy, int gridx, int blockz, int blocky, int blockx, const half* __restrict__ input, half* __restrict__ oldinput, bool* __restrict__ changeMap, const int width, const int height, const int nInputPlane, const int kHHalf, const int kWHalf, const float diffThreshold, const bool updateInputState) { dim3 grid(gridx, gridy, gridz); dim3 block(blockx, blocky, blockz); if(kHHalf == 0 and kWHalf == 0) { changeDetection_1x1_kernel<<<grid, block>>>(input, oldinput, changeMap, width, height, nInputPlane, diffThreshold, updateInputState); } else { changeDetection_kernel<<<grid, block>>>(input, oldinput, changeMap, width, height, nInputPlane, kHHalf, kWHalf, diffThreshold, updateInputState); } } __global__ void changePropagation_kernel(const bool* __restrict__ changeMatrixIn, bool* __restrict__ changeMatrixOut, const int width, const int height, const int kHHalf, const int kWHalf) { // compute pixel index int pxlInpIdx = blockIdx.x * blockDim.x + threadIdx.x; if(pxlInpIdx >= height * width) return; //check if any pixel in the input range has changed bool change = false; int xOut = pxlInpIdx % width; int yOut = pxlInpIdx / width; for (int k = -kHHalf; k <= kHHalf; ++k) { int yIn = yOut + k; for (int l = -kWHalf; l <= kWHalf; ++l) { int xIn = xOut + l; if(yIn>=0 && yIn<height && xIn>=0 && xIn<width) { change = change || changeMatrixIn[yIn*width + xIn]; } } } changeMatrixOut[yOut*width + xOut] = change; } void changePropagation(int gridz, int gridy, int gridx, int blockz, int blocky, int blockx, const bool* __restrict__ changeMatrixIn, bool* __restrict__ changeMatrixOut, const int width, const int height, const int kHHalf, const int kWHalf) { dim3 grid(gridx, gridy, gridz); dim3 block(blockx, blocky, blockz); changePropagation_kernel<<<grid, block>>>(changeMatrixIn, changeMatrixOut, width, height, kHHalf, kWHalf); } __global__ void genXMatrix_kernel( half* columns, const half* __restrict__ input, const int* __restrict__ changeList, const int kW, const int kH, const int nInputPlane, const int width, const int height, const int numChanges) { const int kx = threadIdx.x; const int ky = threadIdx.z; const int changeIdx = blockIdx.x * blockDim.y + threadIdx.y; if(changeIdx < numChanges) { int pos = changeList[changeIdx]; int ix = pos % width + kx - (kW-1)/2; int iy = pos / width + ky - (kH-1)/2; half *dst = columns + changeIdx*(kW*kH*nInputPlane) + ky*kW+kx; const bool isInImage = ix>=0 && ix < width && iy>=0 && iy< height; for (int i = 0; i < nInputPlane; ++i) { dst[i*kH*kW] = isInImage ? input[(i*height + iy) * width + ix] : __float2half(0.0f); } } } void genXMatrix(int gridz, int gridy, int gridx, int blockz, int blocky, int blockx, half* columns, const half* __restrict__ input, const int* __restrict__ changeList, const int kW, const int kH, const int nInputPlane, const int width, const int height, const int numChanges) { dim3 grid(gridx, gridy, gridz); dim3 block(blockx, blocky, blockz); genXMatrix_kernel<<<grid, block>>>(columns, input, changeList, kW, kH, nInputPlane, width, height, numChanges); } __global__ void updateOutput_kernel(const half* __restrict__ columnsOut, half* output, const int* __restrict__ changeList, const int numOutputPixel, const int numChanges, const int nOutputPlane, const bool relu) { int count = blockIdx.x * blockDim.x + threadIdx.x; if(count < numChanges*nOutputPlane) { int outpPlane = count / numChanges; int changeNr = count % numChanges; int pxl = changeList[changeNr]; half v = columnsOut[count]; v = relu && __hle(v, __float2half(0.0f)) ? __float2half(0.0f) : v; output[outpPlane*numOutputPixel + pxl] = v; } } void updateOutput(int gridz, int gridy, int gridx, int blockz, int blocky, int blockx, half *columnsOut, half *output, int* changeList, int numOutputPixel, int numChanges, int nOutputPlane, bool relu) { dim3 grid(gridx, gridy, gridz); dim3 block(blockx, blocky, blockz); updateOutput_kernel<<<grid, block>>>(columnsOut, output, changeList, numOutputPixel, numChanges, nOutputPlane, relu); } __global__ void maxPool2d_kernel(const half* __restrict__ input, half* __restrict__ output, const int* __restrict__ changeIndexes, const int numChanges, const int numCh, const int iheight, const int iwidth, const int oheight, const int owidth, const int stridey, const int stridex) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid >= numChanges) return; int pxIdx = changeIndexes[tid]; int y = pxIdx / iwidth, x = pxIdx % iwidth; int yo = y / stridey, xo = x / stridex; for(int ch = 0; ch < numCh; ch++) { half v = __float2half(-INFINITY); for(int j = 0; j < stridey; j++) { for(int i = 0; i < stridex; i++) { int yi = yo*stridey + j, xi = xo*stridex + i; if(yi < iheight && xi < iwidth) { half iVal = input[(ch*iheight + yi)*iwidth + xi]; if (__hgt(iVal, v)) v = iVal; } } } output[(ch*oheight + yo)*owidth + xo] = v; } } void maxPool2d(int gridx, int blockx, half *input, half *output, int* changeIndexes, int numChanges, int numCh, int iheight, int iwidth, int oheight, int owidth, int stridey, int stridex) { //determine grid and block size based on number of changes dim3 grid(gridx); dim3 block(blockx); maxPool2d_kernel<<<grid, block>>>(input, output, changeIndexes, numChanges, numCh, iheight, iwidth, oheight, owidth, stridey, stridex); } }
11,947
/* Copyright 2011 Russel Steinbach, Jeffrey Blanchard, Bradley Gordon, * and Toluwaloju Alabi * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <thrust/binary_search.h> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/random.h> #include <thrust/sort.h> #include <thrust/transform_reduce.h> namespace BucketSelect { using namespace std; #define MAX_THREADS_PER_BLOCK 1024 #define CUTOFF_POINT 200000 #define NUM_PIVOTS 17 #define CUDA_CALL_BS(x) \ do { \ if ((x) != cudaSuccess) { \ printf("Error at %s:%d\n", __FILE__, __LINE__); \ return EXIT_FAILURE; \ } \ } while (0) cudaEvent_t start, stop; float time; void timing(int selection, int ind) { if (selection == 0) { //****// cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); //****// } else { //****// cudaThreadSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("Time %d: %lf \n", ind, time); //****// } } template <typename T> void cleanup(uint *h_c, T *d_k, int *etb, uint *bc) { free(h_c); cudaFree(d_k); cudaFree(etb); cudaFree(bc); } // This function initializes a vector to all zeros on the host (CPU) void setToAllZero(uint *deviceVector, int length) { cudaMemset(deviceVector, 0, length * sizeof(uint)); } // this function assigns elements to buckets template <typename T> __global__ void assignBucket(T * d_vector, int length, int bucketNumbers, double slope, double minimum, int * bucket, uint * bucketCount, int offset) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int bucketIndex; extern __shared__ uint sharedBuckets[]; int index = threadIdx.x; // variables in shared memory for fast access __shared__ int sbucketNums; __shared__ double sMin; sbucketNums = bucketNumbers; sMin = minimum; // reading bucket counts into shared memory where increments will be performed for (int i = 0; i < (bucketNumbers / 1024); i++) if (index < bucketNumbers) sharedBuckets[i * 1024 + index] = 0; __syncthreads(); // assigning elements to buckets and incrementing the bucket counts for (int I = idx; I < length; I += stride) { int i; for (i = idx; i < length; i += offset) { // calculate the bucketIndex for each element bucketIndex = (d_vector[i] - sMin) * slope; // if it goes beyond the number of buckets, put it in the last bucket if (bucketIndex >= sbucketNums) { bucketIndex = sbucketNums - 1; } bucket[i] = bucketIndex; atomicInc(&sharedBuckets[bucketIndex], length); } } __syncthreads(); // reading bucket counts from shared memory back to global memory for (int i = 0; i < (bucketNumbers / 1024); i++) if (index < bucketNumbers) atomicAdd(&bucketCount[i * 1024 + index], sharedBuckets[i * 1024 + index]); } // this function reassigns elements to buckets template <typename T> __global__ void reassignBucket(T * d_vector, int * bucket, uint * bucketCount, const int bucketNumbers, const int length, const double slope, const double maximum, const double minimum, int offset, int Kbucket) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int stride = blockDim.x * gridDim.x; extern __shared__ uint sharedBuckets[]; int index = threadIdx.x; int bucketIndex; // reading bucket counts to shared memory where increments will be performed if (index < bucketNumbers) { sharedBuckets[index] = 0; } __syncthreads(); // assigning elements to buckets and incrementing the bucket counts for (int I = idx; I < length; I += stride) { int i; for (i = idx; i < length; i += offset) { if (bucket[i] != Kbucket) { bucket[i] = bucketNumbers + 1; } else { // calculate the bucketIndex for each element bucketIndex = (d_vector[i] - minimum) * slope; // if it goes beyond the number of buckets, put it in the last bucket if (bucketIndex >= bucketNumbers) { bucketIndex = bucketNumbers - 1; } bucket[i] = bucketIndex; atomicInc(&sharedBuckets[bucketIndex], length); } } } __syncthreads(); // reading bucket counts from shared memory back to global memory if (index < bucketNumbers) { atomicAdd(&bucketCount[index], sharedBuckets[index]); } } // copy elements in the kth bucket to a new array template <typename T> __global__ void copyElement( T *d_vector, int length, int *elementToBucket, int bucket, T *newArray, uint *count, int offset) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int I = idx; I < length; I += stride) { for (int i = idx; i < length; i += offset) // copy elements in the kth bucket to the new array if (elementToBucket[i] == bucket) newArray[atomicInc(count, length)] = d_vector[i]; } } // this function finds the bin containing the kth element we are looking for (works on the host) inline int FindKBucket(uint *d_counter, uint *h_counter, const int numBuckets, const int k, uint *sum) { cudaMemcpy(sum, d_counter, sizeof(uint), cudaMemcpyDeviceToHost); int Kbucket = 0; if (*sum < k) { cudaMemcpy(h_counter, d_counter, numBuckets * sizeof(uint), cudaMemcpyDeviceToHost); while ((*sum < k) & (Kbucket < numBuckets - 1)) { Kbucket++; *sum += h_counter[Kbucket]; } } else { cudaMemcpy(h_counter, d_counter, sizeof(uint), cudaMemcpyDeviceToHost); } return Kbucket; } /* //this function finds the bin containing the kth element we are looking for (works on the host) inline int FindSmartKBucket(uint *d_counter, uint *h_counter, const int num_buckets, int k, uint * sum){ cudaMemcpy(sum, d_counter, sizeof(uint), cudaMemcpyDeviceToHost); int Kbucket = 0; int warp_size = 32; if (*sum<k){ while ( (*sum<k) & (Kbucket<num_buckets-1)) { Kbucket++; if (!((Kbucket-1)%32)) cudaMemcpy(h_counter + Kbucket, d_counter + Kbucket, warp_size * sizeof(uint), cudaMemcpyDeviceToHost); *sum += h_counter[Kbucket]; } } else{ cudaMemcpy(h_counter, d_counter, sizeof(uint), cudaMemcpyDeviceToHost); } return Kbucket; } */ template <typename T> __global__ void GetKvalue(T *d_vector, int *d_bucket, const int Kbucket, const int n, T *Kvalue, int offset) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int I = idx; I < n; I += stride) { int i; for (i = idx; i < n; i += offset) { if (d_bucket[i] == Kbucket) Kvalue[0] = d_vector[i]; } } } /************************************************************************/ /************************************************************************/ // THIS IS THE PHASE TWO FUNCTION WHICH WILL BE CALLED IF THE INPUT // LENGTH IS LESS THAN THE CUTOFF OF 2MILLION 200 THOUSAND /************************************************************************/ template <typename T> T phaseTwo(T *d_vector, int length, int K, int blocks, int threads, double maxValue = 0, double minValue = 0) { // declaring and initializing variables for kernel launches int threadsPerBlock = threads; int numBlocks = blocks; int numBuckets = 1024; int offset = blocks * threads; uint sum = 0, Kbucket = 0, iter = 0; int Kbucket_count = 0; // initializing variables for kernel launches if (length < 1024) { numBlocks = 1; } // variable to store the end result T kthValue = 0; // declaring and initializing other variables size_t size = length * sizeof(int); size_t totalBucketSize = numBuckets * sizeof(uint); // allocate memory to store bucket assignments and to count elements in buckets int * elementToBucket; uint *d_bucketCount; cudaMalloc(&elementToBucket, size); cudaMalloc(&d_bucketCount, totalBucketSize); uint *h_bucketCount = (uint *)malloc(totalBucketSize); T *d_Kth_val; cudaMalloc(&d_Kth_val, sizeof(T)); thrust::device_ptr<T> dev_ptr(d_vector); // if max == min, then we know that it must not have had the values passed in. if (maxValue == minValue) { thrust::pair<thrust::device_ptr<T>, thrust::device_ptr<T>> result = thrust::minmax_element(dev_ptr, dev_ptr + length); minValue = *result.first; maxValue = *result.second; } double slope = (numBuckets - 1) / (maxValue - minValue); // first check is max is equal to min if (maxValue == minValue) { cleanup(h_bucketCount, d_Kth_val, elementToBucket, d_bucketCount); return maxValue; } // make all entries of this vector equal to zero setToAllZero(d_bucketCount, numBuckets); // distribute elements to bucket assignBucket<<<numBlocks, threadsPerBlock, numBuckets * sizeof(uint)>>>( d_vector, length, numBuckets, slope, minValue, elementToBucket, d_bucketCount, offset); // find the bucket containing the kth element we want Kbucket = FindKBucket(d_bucketCount, h_bucketCount, numBuckets, K, &sum); Kbucket_count = h_bucketCount[Kbucket]; while ((Kbucket_count > 1) && (iter < 1000)) { minValue = max(minValue, minValue + Kbucket / slope); maxValue = min(maxValue, minValue + 1 / slope); K = K - sum + Kbucket_count; if (maxValue - minValue > 0.0f) { slope = (numBuckets - 1) / (maxValue - minValue); setToAllZero(d_bucketCount, numBuckets); reassignBucket<<<numBlocks, threadsPerBlock, numBuckets * sizeof(uint)>>>( d_vector, elementToBucket, d_bucketCount, numBuckets, length, slope, maxValue, minValue, offset, Kbucket); sum = 0; Kbucket = FindKBucket(d_bucketCount, h_bucketCount, numBuckets, K, &sum); Kbucket_count = h_bucketCount[Kbucket]; iter++; } else { // if the max and min are the same, then we are done cleanup(h_bucketCount, d_Kth_val, elementToBucket, d_bucketCount); return maxValue; } } GetKvalue<<<numBlocks, threadsPerBlock>>>(d_vector, elementToBucket, Kbucket, length, d_Kth_val, offset); cudaMemcpy(&kthValue, d_Kth_val, sizeof(T), cudaMemcpyDeviceToHost); cudaThreadSynchronize(); cleanup(h_bucketCount, d_Kth_val, elementToBucket, d_bucketCount); return kthValue; } /* this function finds the kth-largest element from the input array */ template <typename T> T phaseOne(T *d_vector, int length, int K, int blocks, int threads, int pass = 0) { // declaring variables for kernel launches int threadsPerBlock = threads; int numBlocks = blocks; int numBuckets = 1024; int offset = blocks * threads; int kthBucket, kthBucketCount; int newInputLength; int *elementToBucket; // array showing what bucket every element is in // declaring and initializing other variables uint *d_bucketCount, *count; // array showing the number of elements in each bucket uint kthBucketScanner = 0; size_t size = length * sizeof(int); // variable to store the end result T kthValue = 0; T *newInput; // find max and min with thrust double maximum, minimum; thrust::device_ptr<T> dev_ptr(d_vector); thrust::pair<thrust::device_ptr<T>, thrust::device_ptr<T>> result = thrust::minmax_element(dev_ptr, dev_ptr + length); minimum = *result.first; maximum = *result.second; // if the max and the min are the same, then we are done if (maximum == minimum) { return maximum; } // if we want the max or min just return it if (K == 1) { return minimum; } if (K == length) { return maximum; } // Allocate memory to store bucket assignments CUDA_CALL_BS(cudaMalloc(&elementToBucket, size)); // Allocate memory to store bucket counts size_t totalBucketSize = numBuckets * sizeof(uint); CUDA_CALL_BS(cudaMalloc(&d_bucketCount, totalBucketSize)); uint *h_bucketCount = (uint *)malloc(totalBucketSize); // Calculate max-min double range = maximum - minimum; // Calculate the slope, i.e numBuckets/range double slope = (numBuckets - 1) / range; cudaMalloc(&count, sizeof(uint)); // Set the bucket count vector to all zeros setToAllZero(d_bucketCount, numBuckets); // Distribute elements into their respective buckets assignBucket<<<numBlocks, threadsPerBlock, numBuckets * sizeof(uint)>>>( d_vector, length, numBuckets, slope, minimum, elementToBucket, d_bucketCount, offset); kthBucket = FindKBucket(d_bucketCount, h_bucketCount, numBuckets, K, &kthBucketScanner); kthBucketCount = h_bucketCount[kthBucket]; printf("naive kthBucketCount = %d\n", kthBucketCount); // we must update K since we have reduced the problem size to elements in the kth bucket if (kthBucket != 0) { K = kthBucketCount - (kthBucketScanner - K); } // copy elements in the kth bucket to a new array cudaMalloc(&newInput, kthBucketCount * sizeof(T)); setToAllZero(count, 1); copyElement<<<numBlocks, threadsPerBlock>>>(d_vector, length, elementToBucket, kthBucket, newInput, count, offset); // store the length of the newly copied elements newInputLength = kthBucketCount; // if we only copied one element, then we are done if (newInputLength == 1) { thrust::device_ptr<T> new_ptr(newInput); kthValue = new_ptr[0]; // free all used memory cudaFree(elementToBucket); cudaFree(d_bucketCount); cudaFree(count); cudaFree(newInput); return kthValue; } /*********************************************************************/ // END OF FIRST PASS, NOW WE PROCEED TO SUBSEQUENT PASSES /*********************************************************************/ // if the new length is greater than the CUTOFF, run the regular phaseOne again if (newInputLength > CUTOFF_POINT && pass < 1) { if (pass > 0) { cudaFree(d_vector); } cudaFree(elementToBucket); cudaFree(d_bucketCount); cudaFree(count); kthValue = phaseOne(newInput, newInputLength, K, blocks, threads, pass + 1); } else { minimum = max(minimum, minimum + kthBucket / slope); maximum = min(maximum, minimum + 1 / slope); kthValue = phaseTwo(newInput, newInputLength, K, blocks, threads, maximum, minimum); } // free all used memory cudaFree(elementToBucket); cudaFree(d_bucketCount); cudaFree(newInput); cudaFree(count); return kthValue; } /**************************************************************************/ /**************************************************************************/ // THIS IS THE BUCKETSELECT FUNCTION WRAPPER THAT CHOOSES THE CORRECT VERSION // OF BUCKET SELECT TO RUN BASED ON THE INPUT LENGTH /**************************************************************************/ template <typename T> T bucketSelectWrapper(T *d_vector, int length, int K, int blocks, int threads) { T kthValue; // change K to be the kth smallest K = length - K + 1; if (length <= CUTOFF_POINT) { kthValue = phaseTwo(d_vector, length, K, blocks, threads); return kthValue; } else { kthValue = phaseOne(d_vector, length, K, blocks, threads); return kthValue; } } } // namespace BucketSelect
11,948
#include<iostream> #include<iomanip> #include<fstream> #include<vector> #include<utility> #include<chrono> #include<cstdlib> #include<cstdio> #include<cmath> /*Not sure where the memory allocater resides*/ #include<cuda.h> #define pi 4.0*atan(1.0) #define blockDim_x 128 #define blockDim_y 8 using namespace std; using namespace std::chrono; /* __global__ void add(int N, float *x, float *y){ int index = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x*gridDim.x; for(int i=index; i<N; i+=stride){ y[i] = x[i] + y[i]; } } */ //monolithic kernel __global__ void cuda_diffusion2d_0 ( float *f, /* dependent variable */ float *fn, /* dependent variable */ int nx, /* grid number in the x-direction */ int ny, /* grid number in the x-direction */ float c0, /* coefficient no.0 */ float c1, /* coefficient no.1 */ float c2 /* coefficient no.2 */ ) { int j, jx, jy; float fcc, fce, fcw, fcs, fcn; jy = blockDim.y*blockIdx.y + threadIdx.y; jx = blockDim.x*blockIdx.x + threadIdx.x; //Dirichilet BC if(jx > 0 && jx < nx-1){ if(jy > 0 && jy < ny-1){ j = nx*jy + jx; fcc = f[j]; fcw = f[j - 1]; fce = f[j+1]; fcs = f[j-nx]; fcn = f[j+nx]; fn[j] = c0*(fce + fcw) + c1*(fcn + fcs) + c2*fcc; } } } int main() { int nx; int ny; cout<<"Enter nx, ny "<<endl; cin>>nx; cin>>ny; float dx = 1.0/(float)(nx-1); float dy = 1.0/(float)(ny-1); float dt = 0.01*(dx*dx); //allocate arrays and initial condition //using unified memory float *Told, *Tnew; cudaMallocManaged(&Told,(nx*ny)*sizeof(float)); cudaMallocManaged(&Tnew,(nx*ny)*sizeof(float)); for(int i=0; i<nx; i++){ for(int j=0; j<ny; j++){ int id = i*ny + j; Told[id] = sin((float)i*dx*pi)*sin((float)j*dy*pi); Tnew[id] = 0.0f; } } float kappa = 1.0; float c0 = kappa*dt/(dx*dx), c1 = kappa*dt/(dy*dy), c2 = 1.0 - 2.0*(c0 + c1); int gridX = nx/blockDim_x; int gridY = ny/blockDim_y; //CUDA specific object type dim3 grid(gridX,gridY,1), threads(blockDim_x,blockDim_y,1); //time loop int iter = 0; int itermax = 10000; int steps =1000; double operation = 0.0; high_resolution_clock::time_point t1 = high_resolution_clock::now(); do{ //Updated 2017/7/26 for(int i = 0; i<steps; ++i){ //run kernel on gpu cuda_diffusion2d_0<<<grid,threads>>>(Told,Tnew,nx,ny,c0,c1,c2); swap(Told,Tnew); } cout<<"Step dum: "<<iter+steps<<endl; operation += 7.0*(double)ny *(double)nx*(double)steps; iter +=steps; }while(iter<itermax-steps+1); high_resolution_clock::time_point t2 = high_resolution_clock::now(); duration<double> elapsed_time = duration_cast<duration<double> >(t2-t1); cout<<"Operations : "<<operation<<endl; cout<<"Elapsed time : "<<elapsed_time.count()<<" secs."<<endl; double flops = operation /(elapsed_time.count()*1e9); cout<<"Performance : "<<flops<<" GFLOPS"<<endl; //synchronize host-gpu memory for file output cudaDeviceSynchronize(); //ouput result to .csv file ofstream fileOut; fileOut.open("/mnt/RAM_disk/cudaUnifiedMemHeatEq.csv"); fileOut<<"x,y,z,T\n"; for(int i=0; i<nx; ++i){ for(int j=0; j<ny; ++j){ int id = i*ny + j; float xg = (float)i*dx; float yg = (float)j*dy; fileOut<<setprecision(8); fileOut<<fixed; fileOut<<xg<<"," <<yg<<"," <<Told[id]<<"," <<Told[id]<<"\n"; } } fileOut.close(); //free memory cudaFree(Told); cudaFree(Tnew); return 0; }
11,949
#include "includes.h" __global__ void meanFilter(unsigned char *input, unsigned char *output, int height, int width) { int col= blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; if (row < height && col < width) { int pixelIndex = row * width + col; int pixelNum = 0; int tempSum = 0; for(int i = -FILTER_SIZE + 1; i < FILTER_SIZE; i++) { for(int j = -FILTER_SIZE + 1; j < FILTER_SIZE; j++ ) { if(col + i >= 0 && col + i < width && row + j >= 0 && row + j < height) { tempSum += input[(row + j) * width + col +i]; pixelNum++; } } } output[pixelIndex] = tempSum/pixelNum; } }
11,950
__device__ int get(int x, int y,int width){ return y * width +x; } __device__ int normeValue(int x, int width){ if(x < 0) //-1 return width - 1; if(x == width) return 0; return x; } __device__ int* neighborsIndexes(int i, int j, int width, int height){ int dir[8]; dir[0] = get(normeValue(i+1,width), j, width); dir[1] = get(normeValue(i+1,width), normeValue(j+1,height),width); dir[2] = get(i, normeValue(j+1,height),width); dir[3] = get(normeValue(i-1,width), normeValue(j+1,height),width); dir[4] = get(normeValue(i-1,width), j, width); dir[5] = get(normeValue(i-1,width), normeValue(j-1,height),width); dir[6] = get(i, normeValue(j-1,height),width); dir[7] = get(normeValue(i+1,width), normeValue(j-1,height),width); return dir; } /* __device__ float getTotalUpdateFromNeighbors(float* tmp, int i, int j, int width, int height){ int index = get(i,j,width) * 8; return tmp[get(normeValue(iPlusOne,width), j, width)] + tmp[get(normeValue(iPlusOne,width), normeValue(jPlusOne,height),width)] + tmp[get(i, normeValue(jPlusOne,height),width)] + tmp[get(normeValue(iMinusOne,width), normeValue(jPlusOne,height),width)] + tmp[get(normeValue(iMinusOne,width), j, width)] + tmp[get(normeValue(iMinusOne,width), normeValue(jMinusOne,height),width)] + tmp[get(i, normeValue(jMinusOne,height),width)] + tmp[get(normeValue(iPlusOne,width), normeValue(jMinusOne,height),width)]; } */ extern "C" __global__ void FIELD_MAX_DIR(int width, int height, float *values, int* patchMax) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < width && j < height ){ int k = get(i,j,width); int maxIndex = 0; int* neighbors = neighborsIndexes(i,j,width,height); float max = values[neighbors[0]]; for(int u=1 ; u < 8 ; u++){ float current = values[neighbors[u]]; if(max < current){ max = current; maxIndex = u; } } patchMax[k] = maxIndex * 45; } } //with fields extern "C" __global__ void FIELD_MAX_DIR2(int width, int height, float *values, int* patchMax ) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < width && j < height ){ int k = get(i,j,width); float max = values[get(normeValue(i + 1, width), j, width)]; int maxDir = 0; float current = values[get(normeValue(i + 1, width), normeValue(j + 1, height), width)]; if (current > max) { max = current; maxDir = 45; } current = values[get(i, normeValue(j + 1, height), width)]; if (current > max) { max = current; maxDir = 90; } current = values[get(normeValue(i - 1, width), normeValue(j + 1, height), width)]; if (current > max) { max = current; maxDir = 135; } current = values[get(normeValue(i - 1, width), j, width)]; if (current > max) { max = current; maxDir = 180; } current = values[get(normeValue(i - 1, width), normeValue(j - 1, height), width)]; if (current > max) { max = current; maxDir = 225; } current = values[get(i, normeValue(j - 1, height), width)]; if (current > max) { max = current; maxDir = 270; } current = values[get(normeValue(i + 1, width), normeValue(j - 1, height), width)]; if (current > max) { max = current; maxDir = 315; } patchMax[k] = maxDir; } } //with fields extern "C" __global__ void DIFFUSION_UPDATE_THEN_EVAPORATION_THEN_FIELDMAXDIRV2( int width, int height, float *values, float* tmp, float evapCoef, int* patchMax) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < width && j < height ){ float max = tmp[get(normeValue(i + 1, width), j, width)]; float total = max; int maxDir = 0; float current = tmp[get(normeValue(i + 1, width), normeValue(j + 1, height), width)]; total += current; if (current > max) { max = current; maxDir = 45; } current = tmp[get(i, normeValue(j + 1, height), width)]; total += current; if (current > max) { max = current; maxDir = 90; } current = tmp[get(normeValue(i - 1, width), normeValue(j + 1, height), width)]; total += current; if (current > max) { max = current; maxDir = 135; } current = tmp[get(normeValue(i - 1, width), j, width)]; total += current; if (current > max) { max = current; maxDir = 180; } current = tmp[get(normeValue(i - 1, width), normeValue(j - 1, height), width)]; total += current; if (current > max) { max = current; maxDir = 225; } current = tmp[get(i, normeValue(j - 1, height), width)]; total += current; if (current > max) { max = current; maxDir = 270; } current = tmp[get(normeValue(i + 1, width), normeValue(j - 1, height), width)]; total += current; if (current > max) { max = current; maxDir = 315; } int k = get(i,j,width); patchMax[k] = maxDir; total += values[k]; values[k] = total - total * evapCoef; } } //with fields extern "C" __global__ void DIFFUSION_UPDATE_THEN_EVAPORATION_THEN_FIELDMAXDIRV3( int width, int height, float *values, float* tmp, float evapCoef, int* patchMax) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < width && j < height ){ int iPlusOne = i + 1; int jPlusOne = j + 1; int iMinusOne = i - 1; int jMinusOne = j - 1; float max = tmp[get(normeValue(iPlusOne, width), j, width)]; float total = max; int maxDir = 0; float current = tmp[get(normeValue(iPlusOne, width), normeValue(jPlusOne, height), width)]; total += current; if (current > max) { max = current; maxDir = 45; } current = tmp[get(i, normeValue(jPlusOne, height), width)]; total += current; if (current > max) { max = current; maxDir = 90; } current = tmp[get(normeValue(iMinusOne, width), normeValue(jPlusOne, height), width)]; total += current; if (current > max) { max = current; maxDir = 135; } current = tmp[get(normeValue(iMinusOne, width), j, width)]; total += current; if (current > max) { max = current; maxDir = 180; } current = tmp[get(normeValue(iMinusOne, width), normeValue(jMinusOne, height), width)]; total += current; if (current > max) { max = current; maxDir = 225; } current = tmp[get(i, normeValue(jMinusOne, height), width)]; total += current; if (current > max) { max = current; maxDir = 270; } current = tmp[get(normeValue(iPlusOne, width), normeValue(jMinusOne, height), width)]; total += current; if (current > max) { max = current; maxDir = 315; } int k = get(i,j,width); patchMax[k] = maxDir; total += values[k]; values[k] = total - total * evapCoef; } } extern "C" __global__ void DIFFUSION_UPDATE_THEN_EVAPORATION_THEN_FIELDMAXDIR( int width, int height, float *values, float* tmp, float evapCoef, int* patchMax) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < width && j < height ){//TODO + alone int k = get(i,j,width); int* neighbors = neighborsIndexes(i,j,width,height); int maxIndex = 0; float total = tmp[neighbors[0]]; float max = total; for(int u=1 ; u < 8 ; u++){ float current = tmp[neighbors[u]]; total += current; if(max < current){ max = current; maxIndex = u; } } patchMax[k] = maxIndex * 45; total += values[k]; values[k] = total - total * evapCoef; } }
11,951
// // Created by auyar on 3.02.2021. // #include "cudf_a2a.cuh" namespace gcylon { __global__ void rebaseOffsets(int32_t * arr, int size, int32_t base) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { arr[i] -= base; } } int ceil(const int& numerator, const int& denominator) { return (numerator + denominator - 1) / denominator; } //todo: need to take care of the case when the size is more than // max_thread_count_per_block * max_number_of_blocks void callRebaseOffsets(int32_t * arr, int size, int32_t base){ int threads_per_block = 256; int number_of_blocks = ceil(size, threads_per_block); rebaseOffsets<<<number_of_blocks, threads_per_block>>>(arr, size, base); cudaDeviceSynchronize(); } }// end of namespace gcylon
11,952
#include "includes.h" __global__ void gpu_array_init_r8__(size_t tsize, double *arr, double val) /** arr(:)=val **/ { size_t _ti = blockIdx.x*blockDim.x + threadIdx.x; size_t _gd = gridDim.x*blockDim.x; for(size_t l=_ti;l<tsize;l+=_gd){arr[l]=val;} return; }
11,953
#include "includes.h" __global__ void subsample_ind_and_labels_GPU(int *d_ind_sub, const int *d_ind, unsigned int *d_label_sub, const unsigned int *d_label, int n_out, float inv_sub_factor) { unsigned int ind_out = blockIdx.x * blockDim.x + threadIdx.x; if (ind_out < n_out) { int ind_in = (int)floorf((float)(ind_out) * inv_sub_factor); d_ind_sub[ind_out] = d_ind[ind_in]; d_label_sub[ind_out] = d_label[ind_in]; } }
11,954
#include <stdio.h> #include <stdlib.h> #include <time.h> //CUDA RunTime API #include <cuda_runtime.h> #define THREAD_NUM 256 #define MATRIX_SIZE 1000 //int blocks_num = MATRIX_SIZE * MATRIX_SIZE / 1024;; //const int blocks_num = MATRIX_SIZE*(MATRIX_SIZE + THREAD_NUM - 1) / THREAD_NUM; //打印设备信息 void printDeviceProp(const cudaDeviceProp &prop) { printf("Device Name : %s.\n", prop.name); printf("totalFlbalMem : %d.\n", prop.totalGlobalMem); printf("sharedMemPerBlock : %d.\n", prop.sharedMemPerBlock); printf("regsPerBlock : %d.\n", prop.regsPerBlock); printf("warpSize : %d.\n", prop.warpSize); printf("memPitch : %d.\n", prop.memPitch); } bool InitCUDA() { int count; //取得CUDA装置的数目 cudaGetDeviceCount(&count); if(count == 0){ fprintf(stderr, "There is no device.\n"); return false; } int i; for(i = 0; i < count; i ++){ cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); if(cudaGetDeviceProperties(&prop, i) == cudaSuccess){ if(prop.major >= 1){ break; } } } if(i == count){ fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i); return true; } void matgen(float* a, int n) { int i, j; for(i = 0; i < n; i++){ for(j = 0; j < n; j++) { a[i * n + j] = (float)rand() / RAND_MAX + (float)rand() / (RAND_MAX * RAND_MAX); } } } __global__ static void matMultCUDA(const float* a, const float* b, float* c, int n, clock_t* time, const int* blocks_num) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int idx = bid * THREAD_NUM + tid; const int row = idx / n; const int column = idx % n; int i; clock_t start; if(tid == 0) time[bid] = clock(); if(row < n && column < n){ float t = 0; float y = 0; for(i = 0; i < n; i++){ float r; y -= a[row * n + i] * b[i * n + column]; r = t - y; y = (r-t)+y; t = r; } c[row * n + column] = t; } if(tid == 0){ time[bid + *blocks_num] = clock(); } } int func(int blocks_num) { printf("%d\n", blocks_num); if(!InitCUDA()){ return 0; } float *a, *b, *c, *d; int *e; int n = MATRIX_SIZE; a = (float*)malloc(sizeof(float)* n * n); b = (float*)malloc(sizeof(float)* n * n); c = (float*)malloc(sizeof(float)* n * n); d = (float*)malloc(sizeof(float)* n * n); e = (int*)malloc(sizeof(int)); *e = blocks_num; srand(0); matgen(a, n); matgen(b, n); float *cuda_a, *cuda_b, *cuda_c; int *cuda_e; clock_t* time; cudaMalloc((void**)&cuda_a, sizeof(float) * n * n); cudaMalloc((void**)&cuda_b, sizeof(float) * n * n); cudaMalloc((void**)&cuda_c, sizeof(float) * n * n); cudaMalloc((void**)&cuda_e, sizeof(int)); cudaMalloc((void**)&time, sizeof(clock_t) * blocks_num * 2); cudaMemcpy(cuda_a, a, sizeof(float) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(cuda_b, b, sizeof(float) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(cuda_e, e, sizeof(int), cudaMemcpyHostToDevice); matMultCUDA<<<blocks_num, THREAD_NUM, 0>>>(cuda_a, cuda_b, cuda_c, n, time, cuda_e); clock_t time_use[blocks_num * 2]; cudaMemcpy(c, cuda_c, sizeof(float) * n * n, cudaMemcpyDeviceToHost); cudaMemcpy(&time_use, time, sizeof(clock_t) * blocks_num * 2, cudaMemcpyHostToDevice); cudaFree(cuda_a); cudaFree(cuda_b); cudaFree(cuda_c); cudaFree(time); clock_t min_start, max_end; min_start = time_use[0]; max_end = time_use[blocks_num]; for(int i = 1; i < blocks_num; i++){ if(min_start > time_use[i]){ min_start = time_use[i]; } if(max_end < time_use[i + blocks_num]){ max_end = time_use[i + blocks_num]; } } clock_t final_time = max_end - min_start; //CPU矩阵乘法,存入d中 for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ double t = 0; for(int k = 0; k < n; k++){ t += a[i * n + k] * b[k * n + j]; } d[i * n + j] = t; } } float max_err = 0; float average_err = 0; for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ if(d[i * n + j] != 0){ float err = fabs((c[i * n + j] - d[i * n + j]) / d[i * n + j]); if(max_err < err) max_err = err; average_err += err; } } } printf("Max error: %g Average error: %g\n", max_err, average_err / (n * n)); printf("gputime: %d\n", final_time); return 0; } int main() { int blocks_num; //blocks_num = MATRIX_SIZE*(MATRIX_SIZE + THREAD_NUM - 1) / THREAD_NUM; //func(blocks_num); blocks_num = MATRIX_SIZE * MATRIX_SIZE / 1024;; func(blocks_num); //blocks_num = MATRIX_SIZE*(MATRIX_SIZE + THREAD_NUM - 1) / THREAD_NUM; //func(blocks_num); }
11,955
#include "includes.h" /* * Find BLANK and replace your own code. * And submit report why do you replace the blank that way. */ /* 2015004693_YangSangheon */ #define TILE_WIDTH 24 /* set TILE_WIDTH 16 for the evaluation! */ #define MAXPOOL_INPUT_FILENAME "input.txt" #define A_FILENAME "a.txt" #define B_FILENAME "b.txt" #define C_FILENAME "c.txt" using namespace std; __global__ void gemm(float *a, float *b, float *c, const float alpha, const float beta, float *output, const int input_size){ // a, b, c : input matrix address // alpha, beta : input constant // output : output buffer address // input_size : width, height of input matrix // all input, output matrices are vectorized int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; int row = by*blockDim.y + ty; int col = bx*blockDim.x + tx; //if(row>=input_size ||col>=input_size) { return; } if(row >= (input_size/TILE_WIDTH+1)*TILE_WIDTH ||col >= (input_size/TILE_WIDTH+1)*TILE_WIDTH) {return;} // allocate 2D tiles in __shared__ memory __shared__ float s_a[TILE_WIDTH][TILE_WIDTH]; __shared__ float s_b[TILE_WIDTH][TILE_WIDTH]; float result = 0; // make sure you handle the case when the matrix sizes are not // multiple of TILE_WIDTH! // loop over the tiles of the input in phases int a_index; int b_index; for(int p = 0; p < input_size/TILE_WIDTH+1 ;p++){ // CHANGE // You need to use __syncthreads() a few times // to synchronize the threads in a thread block. a_index = row*input_size + p*TILE_WIDTH +tx; b_index = (ty + p*TILE_WIDTH)*input_size + col; if(a_index < input_size * input_size ) s_a[ty][tx] = a[a_index]; else s_a[ty][tx] = 0.0; if(b_index < input_size*input_size ) s_b[ty][tx] = b[b_index]; else s_b[ty][tx] = 0.0; // s_a[ty][tx] = a[row*input_size + p*TILE_WIDTH+tx]; // s_b[ty][tx] = b[(ty+p*TILE_WIDTH)*input_size + col]; __syncthreads(); for(int i = 0; i<TILE_WIDTH; i++) result += s_a[ty][i] * s_b[i][tx]; __syncthreads(); } //__syncthreads(); // write out the result to output[row*input_size + col] // CHANGE if(row < input_size && col < input_size) output[row*input_size + col] = (alpha * result) + (beta * c[row*input_size + col]); //__syncthreads(); }
11,956
#include "tsne_p.cuh" int main() { tsne_p(NULL, 1, NULL, 1); return 0; }
11,957
#include<cuda.h> #include<stdio.h> void initializeArray(int*,int); void stampaMatriceArray(int*, int, int); void equalArray(int*, int*, int); void sommaMatriciCompPerCompCPU(int *, int *, int *, int); //specifica il tipo di funzione kernel __global__ void sommaMatriciCompPerCompGPU(int*, int*, int*, int); int main(int argn, char * argv[]){ //numero di blocchi e numero di thread per blocco dim3 gridDim, blockDim; int N; //numero totale di elementi dell'array (matrice) int flag; //array memorizzati sull'host int *A_host, *B_host, *C_host; //array memorizzati sul device int *A_device, *B_device, *C_device; int *copy; //array in cui copieremo i risultati di C_device int size; //size in byte di ciascun array int SM = 1536; //max num blocchi 8 int threadEffettiviSM = 0; int blocResidentiSM = 0; int num = 8; if(argn<4){ printf("Numero di parametri insufficiente!!!\n"); printf("Uso corretto: %s <NumElementi> <NumThreadPerBlocco> <flag per la Stampa>\n",argv[0]); printf("Uso dei valori di default\n"); blockDim.x = blockDim.y = num; N=100; flag=0; } else{ num= atoi(argv[2]); N=atoi(argv[1]); blockDim.x = blockDim.y = num; flag=atoi(argv[3]); } printf("***\t SOMMA COMPONENTE PER COMPONENTE DI DUE MATRICI \t***\n"); //determinazione esatta del numero di blocchi gridDim.x = N / blockDim.x + ((N % blockDim.x) == 0 ? 0:1); //se la divisione ha resto dobbiamo aggiungere un blocco in più alle righe gridDim.y = N / blockDim.y + ((N % blockDim.y) == 0 ? 0:1); //se la divisione ha resto dobbiamo aggiungere un blocco in più alle colonne //size in byte di ogni array size = N*N*sizeof(int); blocResidentiSM = SM / (blockDim.x*blockDim.y); //stampa delle info sull'esecuzione del kernel printf("Taglia della matrice N*N = %d * %d\n", N,N); printf("Numero di thread per blocco = %d\n", blockDim.x*blockDim.y); printf("Numero di blocchi = %d\n", gridDim.x*gridDim.y); printf("Numero di blocchi residenti per SM in totale= %d \n", blocResidentiSM); printf("Numero di SM usati in totale= %d \n", blocResidentiSM/8); threadEffettiviSM = blockDim.x*blockDim.y*8; if(threadEffettiviSM == SM) printf("Uso ottimale degli SM \n"); else printf("Usati solo %d thread di %d per ogni SM \n",threadEffettiviSM,SM); //allocazione dati sull'host A_host=(int*)malloc(size); B_host=(int*)malloc(size); C_host=(int*)malloc(size); copy=(int*)malloc(size); //array in cui copieremo i risultati di C_device //allocazione dati sul device cudaMalloc((void**)&A_device,size); cudaMalloc((void**)&B_device,size); cudaMalloc((void**)&C_device,size); //inizializzazione dati sull'host initializeArray(A_host, N*N); initializeArray(B_host, N*N); //copia dei dati dall'host al device cudaMemcpy(A_device, A_host, size, cudaMemcpyHostToDevice); cudaMemcpy(B_device, B_host, size, cudaMemcpyHostToDevice); //azzeriamo il contenuto della vettore C memset(C_host, 0, size); //setta a 0 l'array C_host cudaMemset(C_device, 0, size); //setta a 0 l'array C_device cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //invocazione del kernel sommaMatriciCompPerCompGPU<<<gridDim, blockDim>>>(A_device, B_device, C_device, N*N); cudaEventRecord(stop); cudaEventSynchronize(stop); // assicura che tutti siano arrivati all'evento stop prima di registrare il tempo float elapsed; // tempo tra i due eventi in millisecondi cudaEventElapsedTime(&elapsed, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); //copia dei risultati dal device all'host cudaMemcpy(copy, C_device, size, cudaMemcpyDeviceToHost); printf("tempo GPU=%f\n", elapsed); // calcolo su CPU cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //chiamata alla funzione seriale per il prodotto di due array sommaMatriciCompPerCompCPU(A_host, B_host, C_host, N*N); cudaEventRecord(stop); cudaEventSynchronize(stop); // assicura che tutti siano arrivati all'evento stop prima di registrare il tempo cudaEventElapsedTime(&elapsed, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("tempo CPU=%f\n", elapsed); //test di correttezza: verifichiamo che le due somme di matrici corrispondano equalArray(C_host, copy, N*N); //de-allocazione host free(A_host); free(B_host); free(C_host); free(copy); //de-allocazione device cudaFree(A_device); cudaFree(B_device); cudaFree(C_device); exit(0); } void initializeArray(int *array, int n){ int i; for(i=0;i<n;i++){ array[i] = 1/((i+1)*10); if (i % 2 == 0) array[i] = array[i]*(-1); } } void stampaMatriceArray(int* matrice, int righe, int colonne){ int i; for(i=0;i<righe*colonne;i++){ printf("%d \t", matrice[i]); if(i%righe==colonne-1) printf("\n"); } printf("\n"); } void equalArray(int* a, int*b, int n){ int i=0; while(a[i]==b[i]) i++; if(i<n) printf("I risultati dell'host e del device sono diversi\n"); else printf("I risultati dell'host e del device coincidono\n"); } //Seriale void sommaMatriciCompPerCompCPU(int *a, int *b, int *c, int n){ int i; for(i=0;i<n;i++) c[i]=a[i]+b[i]; } //Parallelo __global__ void sommaMatriciCompPerCompGPU(int *a, int *b, int *c, int n){ int i, j, index; i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; index = j * gridDim.x * blockDim.x + i; if(index < n) c[index] = a[index]+b[index]; }
11,958
#include "includes.h" __global__ void mean_array_kernel(float *src, int size, float alpha, float *avg) { const int i = blockIdx.x*blockDim.x + threadIdx.x; if (i >= size) return; avg[i] = avg[i] * (1 - alpha) + src[i] * alpha; src[i] = avg[i]; }
11,959
//question: //does contention for accessing global memory affect read performance? //for instance, n threads access each of the n rows in a column at the same time. //would it be faster for thread i to access row i % n first and proceed to i + j % n? #define TILE_WIDTH 32 #define DIVIDE_ROUND_UP(a, b)((a + b - 1) / b) #define GET_INDEX(row, column, numcols)(row * numcols + column) #include <stdio.h> #include <stdlib.h> #include <time.h> //define matrix type typedef struct{ int row_count; int column_count; int* elements; } Matrix; __global__ void multiply_kernel_stupid(const Matrix left, const Matrix right, Matrix result); Matrix ones(int row_count, int column_count); Matrix random(int row_count, int column_count); Matrix multiply(Matrix left, Matrix right); void print_matrix(Matrix mat); int main(){ //make the matrices you want to multiply srand(time(NULL)); Matrix A = random(512, 512); Matrix B = random(512, 512); Matrix result = multiply(A, B); print_matrix(A); print_matrix(B); print_matrix(result); } //global memory __global__ void multiply_kernel_stupid(const Matrix left, const Matrix right, Matrix result){ int sum = 0; int row_index = blockIdx.y * blockDim.y + threadIdx.y; int column_index = blockIdx.x * blockDim.x + threadIdx.x; for(int i = 0; i < left.column_count; i++){ int left_index = row_index * left.column_count + i; int right_index = column_index + i * right.column_count; sum += left.elements[left_index] * right.elements[right_index]; } result.elements[row_index * result.row_count + column_index] = sum; } //better kernel, shared memory __global__ void multiply_kernel_smart(const Matrix left, const Matrix right, Matrix result){ // allocate shared memory __shared__ int left_shared[TILE_WIDTH*TILE_WIDTH]; __shared__ int right_shared[TILE_WIDTH*TILE_WIDTH]; //get row/col indices int result_row_index = threadIdx.y + blockDim.y * blockIdx.y; int result_col_index = threadIdx.x + blockDim.x * blockIdx.x; //int grid_row = blockIdx.y; //int grid_col = blockIdx.x; int block_row = threadIdx.y; int block_col = threadIdx.x; //how many blocks do we need to multiply? int num_block_mult = DIVIDE_ROUND_UP(left.column_count, blockDim.x); //loop through the tiles that we need to multiply for(int i = 0; i < num_block_mult; i++){ //copy relevant blocks to shared memory, watch out for overflow int left_col_index = i * blockDim.x + block_col; int& left_row_index = result_row_index; //different name for readability int& right_col_index = result_col_index; //different name for readability int right_row_index = i * blockDim.x + block_row; int shared_array_index = block_col + block_row * blockDim.x; int left_index = GET_INDEX(left_row_index, left_col_index, left.column_count); int right_index = GET_INDEX(right_row_index, right_col_index, right.column_count); if(left_col_index < left.column_count && left_row_index < left.row_count){ left_shared[shared_array_index] = left.elements[left_index]; } else{ left_shared[shared_array_index] = 0; } if(right_col_index < right.column_count && right_row_index < right.row_count){ right_shared[shared_array_index] = right.elements[right_index]; } else{ right_shared[shared_array_index] = 0; } //make sure threads all finish copying to shared memory before doing multiplications __syncthreads(); //do multiplications int dot_product = 0; for(int k = 0; k < blockDim.x; k++){ dot_product += left_shared[GET_INDEX(blockIdx.y, k, blockDim.x)] * right_shared[GET_INDEX(k, blockIdx.x, blockDim.x)]; } //make sure threads all finish multiplying before writing resulting block to memory __syncthreads(); //write to global memory result.elements[GET_INDEX(result_row_index, result_col_index, result.column_count)] = dot_product; } } Matrix multiply(Matrix left, Matrix right){ cudaError_t error; //step 1: allocate memory on the kernel for left, right, result Matrix left_d, right_d; left_d.row_count = left.row_count; left_d.column_count = left.column_count; size_t left_size = left.row_count * left.column_count * sizeof(int); error = cudaMalloc((void**) &left_d.elements, left_size); if(error != cudaSuccess){ printf("error allocating left matrix\n"); printf("CUDA error: %s\n", cudaGetErrorString(error)); } right_d.row_count = right.row_count; right_d.column_count = right.column_count; size_t right_size = right.row_count * right.column_count * sizeof(int); error = cudaMalloc((void**) &right_d.elements, right_size); if(error != cudaSuccess){ printf("error allocating right matrix\n"); } //step 2: allocate memory on the host and device for result Matrix result, result_d; result.row_count = result_d.row_count = left.row_count; result.column_count = result_d.column_count = right.column_count; size_t result_size = result.row_count * result.column_count * sizeof(int); result.elements = (int*) malloc(result_size); error = cudaMalloc((void**) &result_d.elements, result_size); if(error != cudaSuccess){ printf("error allocating matrix\n"); } //step 3: copy left and right to device error = cudaMemcpy(left_d.elements, left.elements, left_size, cudaMemcpyHostToDevice); if(error != cudaSuccess){ printf("error copying left matrix\n"); } error = cudaMemcpy(right_d.elements, right.elements, right_size, cudaMemcpyHostToDevice); if(error != cudaSuccess){ printf("error copying right matrix\n"); } //step 4: launch kernel dim3 block_dims(TILE_WIDTH, TILE_WIDTH); dim3 grid_dims(DIVIDE_ROUND_UP(result.column_count, block_dims.x), DIVIDE_ROUND_UP(result.row_count, block_dims.y)); multiply_kernel_smart <<<grid_dims, block_dims>>> (left_d, right_d, result_d); //step 5: copy results back to host error = cudaMemcpy(result.elements, result_d.elements, result_size, cudaMemcpyDeviceToHost); if(error != cudaSuccess){ printf("error copying result matrix\n"); printf("CUDA error: %s\n", cudaGetErrorString(error)); } return result; } Matrix ones (int row_count, int column_count){ Matrix result; result.row_count = row_count; result.column_count = column_count; result.elements = (int*) malloc(row_count * column_count * sizeof(int)); for(int i = 0; i < row_count * column_count; i++){ result.elements[i] = 1; } return result; } Matrix random (int row_count, int column_count){ Matrix result; result.row_count = row_count; result.column_count = column_count; result.elements = (int*) malloc(row_count * column_count * sizeof(int)); for(int i = 0; i < row_count * column_count; i++){ result.elements[i] = rand() % 100; } return result; } void print_matrix(Matrix mat){ int num_elements = mat.row_count * mat.column_count; for(int i = 0; i < num_elements; i++){ printf(" %d", mat.elements[i]); if(!((i + 1) % mat.column_count)){ printf("\n"); } } }
11,960
#include <stdio.h> #include <iostream> #include <cuda_runtime.h> #include "foo.cuh" int main() { std::cout<<"Hello NVCC"<<std::endl; useCUDA(); return 0; }
11,961
#include "RickerSource.h" #include <iostream> #include <thrust/device_ptr.h> #include "thrust/for_each.h" #include <thrust/iterator/counting_iterator.h> #include "math_constants.h" #include "math.h" #include "RickerSource.h" #define Pi CUDART_PI_F void RickerSource::updateField(d_ptr _field, const int time) { updater.field = _field; thrust::counting_iterator<int> start(time); thrust::counting_iterator<int> end(time+1); thrust::for_each(start , end , updater); } __device__ void RickerUpdater::operator()(const int time) { float temp = powf(Pi*(freq*time-M),2); field[0] = amp*(1-2*temp)*expf(-1.0f*temp); }
11,962
/****************************************************************************** * FILE: cuda_matrix.cu * DESCRIPTION: * A simple cuda program to compute square of a N dimensional matrix. * AUTHOR: David Nguyen * CONTACT: david@knytes.com * LAST REVISED: 20/04/2020 ******************************************************************************/ #include <cuda.h> #include <math.h> #include <stdio.h> #define BLOCK_SIZE 1024 #define N 32 #define NUM 10 // Global function for Matrix Squaring __global__ void deviceMatrixSquare(unsigned *mat_d, unsigned *p_mat_d, int width){ unsigned p_value = 0; for(int k = 0; k < width; ++k){ unsigned element = mat_d[threadIdx.y*width+k]; p_value = element * element; p_mat_d[threadIdx.y*width+threadIdx.x] = p_value; } } // Host function to process __host__ void matrixSquare(unsigned *mat, unsigned *p_mat){ unsigned size = N*N*sizeof(unsigned); unsigned *mat_d, *p_mat_d; // Determine number of blocks and blocksize // if N <= 1024, then nBlocks = 1 & nThreads = N unsigned nBlocks = ceil(((float)N*N)/BLOCK_SIZE); // number of blocks needed unsigned nThreads = N*N <= BLOCK_SIZE ? N*N : BLOCK_SIZE; // determine threads // Memory Allocation cudaMalloc(&mat_d, size); cudaMalloc(&p_mat_d, size); // Load mat_d to device memory cudaMemcpy(mat_d, mat, size, cudaMemcpyHostToDevice); // Device Function - Matrix Squaring deviceMatrixSquare<<<nBlocks, nThreads>>>(mat_d, p_mat_d, N); // Copy result from device memory cudaMemcpy(p_mat, p_mat_d, size, cudaMemcpyDeviceToHost); // Free memory cudaFree(mat_d); cudaFree(p_mat_d); } int main(){ // Variable declaration unsigned *mat, *p_mat; // Memory Allocation mat = (unsigned*)malloc(N*N*sizeof(unsigned)); p_mat = (unsigned*)malloc(N*N*sizeof(unsigned)); // Set all matrix values to NUM for(int i = 0; i < N*N; i++){ mat[i] = NUM; } // Print array after num-setting to check values printf("Matrix\n"); for(int i = 0; i < N*N; i++){ printf(" %d", mat[i]); // Newline if end of row if((i+1)%N==0){ printf("\n"); } } printf("\n"); // Call matrix squaring function matrixSquare(mat, p_mat); // Print array after num-setting to check values printf("AFTER MATRIX SQUARING\n"); for(int i = 0; i < N*N; i++){ printf(" %d", p_mat[i]); // Newline if end of row if((i+1)%N==0){ printf("\n"); } } printf("\n"); // Free memory free(mat); return 0; }
11,963
#include "includes.h" __global__ void daxpy_kernel(int n, double a, double * x, double * y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { y[i] = a*x[i] + y[i]; } }
11,964
#include <stdio.h> #include <cuda.h> //#include <cuda_runtime.h> //#include <curand_kernel.h> #include <string.h> #include <time.h> enum comptuationDevice{ dev_both, dev_gpu, dev_cpu }; /*Global Startup Settings*/ struct Startup{ comptuationDevice device = dev_gpu; int randomMod = 2; int seedValue = time(nullptr); int maxDimension = INT_MAX; int startDimension = 2; int threadsPerBlock = 256; int onlyMatrixSize = NULL; char* outputDirectory = "."; bool matSave = false; bool matPrint = false; } startup; /*Matrix Datastructure*/ struct squareMatrix{ int* elements; int dimension; }; const char* help = "\n Shows what parameters are available\n\ \t--help\n\n\ Selects which device should be used:\n\ \t--device cpu\n\ \t--device gpu\n\ \t--device both\n\n\ sets seedvalue for random number generation (default: currentTime)\n\ \t--seed [int]\n\n\ sets mod value for random number generation (default: 2)\n\ \t--random_mod [int]\n\n\ sets max dimension to compute (default: max matrix that can fit in vram)\n\ \t--max_dimension [int]\n\n\ sets starting matrix dimension (default: 2)\n\ \t--start_dimension [int]\n\n\ only computes a single matrix of n size.\n\ \t--only [int]\n\n\ sets the number of threads per block (default: 256). Should be a multiple of cuda cores\n\ \t--block_threads [int 1-1024]\n\n\ outputs matrix a, b and result. (not recommended for extremely large matrices)\n\ \t--mat_print (prints to the console)\n\ \t--mat_save [filepath] (saves to disk. filepath optional)\n\n"; __host__ squareMatrix createRandomSquareMatrix(int dimension){ int mat_elements = dimension * dimension; int* mat = (int*)malloc(sizeof(int)*mat_elements); for (int i = 0; i < mat_elements; i++) mat[i] = rand()%startup.randomMod; return {mat, dimension}; } __host__ void printSquareMatrix(squareMatrix mat){ for (int i = 0; i < mat.dimension*mat.dimension; i++){ if (i % mat.dimension == 0 && i != 0) printf("\n"); printf("%d ", mat.elements[i]); } printf("\n\n"); } __host__ void multiplyMatrices(squareMatrix a, squareMatrix b, squareMatrix result){ if (a.dimension != b.dimension) exit(1); for (int i = 0; i < result.dimension; i++){ for (int j = 0; j < result.dimension; j++){ result.elements[j + result.dimension*i] = 0; for (int k = 0; k < result.dimension; k++) result.elements[j + result.dimension*i] += a.elements[k + result.dimension*i] * b.elements[j + result.dimension*k]; } } } __global__ void multiplyMatricesParallel(int* mat_a, int* mat_b, int* mat_results, int dimension){ int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < dimension*dimension){ mat_results[idx] = 0; for (int i = 0; i < dimension; i++){ int row = idx / dimension; mat_results[idx] += mat_a[i + dimension*row] * mat_b[idx%dimension + dimension*i]; } } } __host__ void printMatrixInfo(int dimension, char* type){ printf("--------------------------------------------------------------------------------------------\nMultiplying %dx%d X %dx%d using the %s ...\n\n", dimension, dimension, dimension, dimension, type); } __host__ void saveMatrixToFile(squareMatrix mat_sav, char* label){ char fileNameBuffer[256]; char dim[30]; snprintf(dim, 10,"%d", mat_sav.dimension); snprintf(fileNameBuffer, sizeof fileNameBuffer, "%s/%sx%s_X_%sx%s_%s.txt", startup.outputDirectory, dim, dim, dim, dim, label); FILE* fp = fopen( fileNameBuffer, "w"); if (fp == nullptr) printf("Could not log to file\n"); else { for (int i = 0; i < mat_sav.dimension*mat_sav.dimension; i++){ if (i % mat_sav.dimension == 0 && i != 0) fprintf(fp, "\n"); fprintf(fp, "%d ", mat_sav.elements[i]); } fprintf(fp, "\n"); } fclose(fp); } __host__ void printTime(clock_t totaltime){ int msec = totaltime * 1000 / CLOCKS_PER_SEC; printf("Done in %d msec!\n", msec); } /* Found on the stack overflow: https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api Throws errors if cuda command doesn't return Success */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __host__ void testHostPreformance(squareMatrix mat_a, squareMatrix mat_b){ printMatrixInfo(mat_a.dimension, (char*)"CPU"); clock_t initalTime = clock(); printf("\tAllocating Result Matrix To Ram... "); clock_t before = clock(); squareMatrix mat_results = {(int*)malloc(sizeof(int)*mat_a.dimension*mat_a.dimension), mat_a.dimension}; printTime(clock() - before); before = clock(); printf("\tPreforming Multiplication... "); multiplyMatrices(mat_a, mat_b, mat_results); printTime(clock() - before); if (startup.matPrint) printSquareMatrix(mat_results); if (startup.matSave) { printf("\tSaving Result Matrix to Disk... "); before = clock(); saveMatrixToFile(mat_results, "matrix_result"); printTime(clock() - before); } printf("\tDeallocating Result Matrix From Ram... "); before = clock(); free(mat_results.elements); printTime(clock() - before); printf("\nTotal Time: "); printTime(clock() - initalTime); } __host__ void testDevicePreformance(squareMatrix mat_a, squareMatrix mat_b){ printMatrixInfo(mat_a.dimension, (char*)"GPU"); clock_t initalTime = clock(); if (mat_a.dimension != mat_b.dimension) exit(1); int allocationsize = mat_a.dimension * mat_b.dimension * sizeof(int); printf("\tAllocating Result Matrix To RAM... "); clock_t before = clock(); squareMatrix mat_results = {(int*)malloc(allocationsize), mat_a.dimension}; printTime(clock() - before); int* dev_mat_a, *dev_mat_b, *dev_mat_results; printf("\tAllocating A, B, and RESULT Matrix To VRAM... "); before = clock(); gpuErrchk(cudaMalloc((void **)&dev_mat_a, allocationsize)); gpuErrchk(cudaMalloc((void **)&dev_mat_b, allocationsize)); gpuErrchk(cudaMalloc((void **)&dev_mat_results, allocationsize)); printTime(clock() - before); printf("\tCopying A, B To VRAM... "); before = clock(); gpuErrchk(cudaMemcpy(dev_mat_a, mat_a.elements, allocationsize, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(dev_mat_b, mat_b.elements, allocationsize, cudaMemcpyHostToDevice)); printTime(clock() - before); printf("\tComputing and Copying Result... "); before = clock(); int totalThreadsNeeded = mat_a.dimension*mat_a.dimension; multiplyMatricesParallel<<<totalThreadsNeeded / startup.threadsPerBlock + 1, startup.threadsPerBlock>>> (dev_mat_a, dev_mat_b, dev_mat_results, mat_a.dimension); gpuErrchk(cudaGetLastError()); gpuErrchk(cudaMemcpy(mat_results.elements, dev_mat_results, allocationsize, cudaMemcpyDeviceToHost)); printTime(clock() - before); if (startup.matPrint) printSquareMatrix(mat_results); if (startup.matSave) { printf("\tSaving Result Matrix to Disk... "); before = clock(); saveMatrixToFile(mat_results, "matrix_result"); printTime(clock() - before); } printf("\tDeallocating Result Matrix... "); before = clock(); cudaFree(dev_mat_a); cudaFree(dev_mat_b); cudaFree(dev_mat_results); free(mat_results.elements); printTime(clock() - before); printf("\nTotal Time: "); printTime(clock() - initalTime); } __host__ void testMatrixMultiplicationPreformance(int dimension){ srand(startup.seedValue); squareMatrix mat_a, mat_b; mat_a = createRandomSquareMatrix(dimension); mat_b = createRandomSquareMatrix(dimension); if (startup.matPrint) { printSquareMatrix(mat_a); printSquareMatrix(mat_b); } if (startup.matSave) { saveMatrixToFile(mat_a, "matrix_A"); saveMatrixToFile(mat_b, "matrix_B"); } if (startup.device != dev_cpu) testDevicePreformance(mat_a, mat_b); if (startup.device != dev_gpu) testHostPreformance(mat_a, mat_b); free(mat_a.elements); free(mat_b.elements); } unsigned int getFreeGpuMem() { size_t free_t; cudaMemGetInfo(&free_t,nullptr); return (unsigned int)free_t; } __host__ unsigned int calculateLargestPossibleMatrixDimension(){ unsigned int free = getFreeGpuMem(); unsigned int memoryPerMatrix = free / (sizeof(int) * 3); unsigned int maxMatrixDimension = sqrt( memoryPerMatrix ) - 1; return maxMatrixDimension; } int main(int argc, char** argv) { for (int i = 0; i < argc; i++){ if (strcmp(argv[i], "--help")==0) {printf("%s", help); exit(-1); } if (strcmp(argv[i], "--device")==0 && i+1 < argc) if (strcmp(argv[i+1], "gpu") == 0) startup.device = dev_gpu; else if (strcmp(argv[i+1], "cpu") == 0) startup.device = dev_cpu; else if (strcmp(argv[i+1], "both") == 0) startup.device = dev_both; if (strcmp(argv[i], "--random_mod")==0 && i+1 < argc) startup.randomMod = atoi(argv[i+1]); if (strcmp(argv[i], "--max_dimension")==0 && i+1 < argc) startup.maxDimension = atoi(argv[i+1]); if (strcmp(argv[i], "--seed")==0 && i+1 < argc) startup.seedValue = atoi(argv[i+1]); if (strcmp(argv[i], "--start_dimension")==0 && i+1 < argc) startup.startDimension = atoi(argv[i+1]); if (strcmp(argv[i], "--only")==0 && i+1 < argc) startup.onlyMatrixSize = atoi(argv[i+1]); if (strcmp(argv[i], "--block_threads")==0 && i+1 < argc) startup.threadsPerBlock = atoi(argv[i+1]); if (strcmp(argv[i], "--mat_print")==0) startup.matPrint = true; if (strcmp(argv[i], "--mat_save")==0){ startup.matSave = true; if (i+1 < argc && strstr(argv[i+1], "--") == NULL) startup.outputDirectory = argv[i+1]; } } /*Tests only one matrix if parameter passed in*/ if (startup.onlyMatrixSize != NULL) testMatrixMultiplicationPreformance(startup.onlyMatrixSize); /*Otherwise, double matrix size until vram is completely filled*/ else { unsigned int maxMatrixDimension = calculateLargestPossibleMatrixDimension(); for (int i = startup.startDimension; i != maxMatrixDimension*2 && i <= startup.maxDimension; i*=2 ) { maxMatrixDimension = calculateLargestPossibleMatrixDimension(); if (i > maxMatrixDimension) i = maxMatrixDimension; testMatrixMultiplicationPreformance(i); } } return 0; }
11,965
#include<stdio.h> #include"scrImagePgmPpmPackage.h" #define BLOCK_SIZE_X 32 #define BLOCK_SIZE_Y 32 //Kernel which calculate the resized image __global__ void calculateHistogram(unsigned int *imageHistogram, unsigned int width, unsigned int height, cudaTextureObject_t texObj) { const unsigned int tidX = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int tidY = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int localId = threadIdx.y*blockDim.x+threadIdx.x; const unsigned int histStartIndex = (blockIdx.y*gridDim.x+blockIdx.x) * 256; __shared__ unsigned int histo_private[256]; if(localId <256) histo_private[localId] = 0; __syncthreads(); // Step 4: Read the texture memory from your texture reference in CUDA Kernel unsigned char imageData = tex2D<unsigned char>(texObj,(float)(tidX),(float)(tidY)); atomicAdd(&(histo_private[imageData]), 1); __syncthreads(); if(localId < 256) imageHistogram[histStartIndex+localId] = histo_private[localId]; } int main(int argc, char*argv[]) { int height=0, width =0, noOfHistogram=0; //Define the scaling ratio unsigned char*data; unsigned int *imageHistogram, *d_imageHistogram; char inputStr[1024] = {"aerosmith-double.pgm"}; cudaError_t returnValue; //Create a channel Description to be used while linking to the tecture cudaArray* cu_array; cudaChannelFormatKind kind = cudaChannelFormatKindUnsigned; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8, 0, 0, 0, kind); get_PgmPpmParams(inputStr, &height, &width); //getting height and width of the current image data = (unsigned char*)malloc(height*width*sizeof(unsigned char)); printf("\n Reading image width height and width [%d][%d]", height, width); scr_read_pgm( inputStr , data, height, width );//loading an image to "inputimage" //height=64;width=64; //data = (unsigned char*)malloc(height*width*sizeof(unsigned char)); /*for(int i=0;i<64;i++) { for(int j=0;j<64;j++) { if(i<32) data[i*width+j] = 22; else data[i*width+j] = 255; } }*/ // One istogram per image block . Since it is char image max size range ie between 0-255 noOfHistogram = (height/BLOCK_SIZE_Y) * (width/BLOCK_SIZE_X) * 256; imageHistogram = (unsigned int*)malloc(noOfHistogram*sizeof(unsigned int)); //Allocate CUDA Array returnValue = cudaMallocArray( &cu_array, &channelDesc, width, height); returnValue = (cudaError_t)(returnValue | cudaMemcpy( cu_array, data, height * width * sizeof(unsigned char), cudaMemcpyHostToDevice)); if(returnValue != cudaSuccess) printf("\n Got error while running CUDA API Array Copy"); // Step 1. Specify texture struct cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; resDesc.res.array.array = cu_array; // Step 2. Specify texture object parameters struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.addressMode[1] = cudaAddressModeClamp; texDesc.filterMode = cudaFilterModePoint; texDesc.readMode = cudaReadModeElementType; texDesc.normalizedCoords = 0; // Step 3. Create texture object cudaTextureObject_t texObj = 0; cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL); if(returnValue != cudaSuccess) printf("\n Got error while running CUDA API Bind Texture"); cudaMalloc(&d_imageHistogram, noOfHistogram*sizeof(unsigned int) ); dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y,1); dim3 dimGrid(width/dimBlock.x,height/dimBlock.y,1); printf("\n Launching grid with blocks [%d][%d] ", dimGrid.x,dimGrid.y); calculateHistogram<<<dimGrid, dimBlock>>>(d_imageHistogram,width,height, texObj); returnValue = (cudaError_t)(returnValue | cudaDeviceSynchronize()); returnValue = (cudaError_t)(returnValue |cudaMemcpy (imageHistogram , d_imageHistogram, noOfHistogram*sizeof(unsigned int), cudaMemcpyDeviceToHost )); if(returnValue != cudaSuccess) printf("\n Got error while running CUDA API kernel"); // Step 5: Destroy texture object cudaDestroyTextureObject(texObj); printf("\n Histogram perr section is as follows: "); for(int i=0;i< noOfHistogram/256;i++) { printf("\n----------------------------- Histograam for block %d----------------------- \n", i); for(int j=0;j<256;j++) { int index = i*256 + j; printf( "[%d=[%d]] ", j, imageHistogram[index]); } } if(data != NULL) free(data); if(cu_array !=NULL) cudaFreeArray(cu_array); if(imageHistogram != NULL) free(imageHistogram); if(d_imageHistogram!=NULL) cudaFree(d_imageHistogram); return 0; }
11,966
/* Loop DOALL -> loop em que todos termos do vetor sao usados e nao ha dependencia entre iteracoes -> PARALELIZAVEL * Loop DOACROSS-> uma iteração depende de uma outra iteração -> nao paralelizavel. * Regra do 80/20 -> 80% da execução do programa ocorre em 20% das instrucoes do codigo (se for do all é paralelizavel). * Se esse loop for do all e executar a mesma intrução não ha necessidade de cada core ter a sua I-cache -> ela e grande e pode ser eliminada -> espaço para mais cores -> GPU * CUDA -> linguagem para programação em GPU que permite computação heterogenia (CPU + GPU) * Terminologia: Host -> CPU e sua memoria; Device -> GPU e sua memoria; * Fluxo do programa: CPU transfere dados para a memoria da GPU (esse processo é muito custoso e por isos as vezes não compensa utilizar a GPU); GPU executa o kernel em paralelo; GPU transefere novamente o dado para CPU (ou DRAM). * A GPU é organizada em SMX, cada SMX tem um conjunto de blocos e cada bloco tem um conjunto de threads. o work load pe despachado da SMX para os blocos em wraps q sao conjuntos de threads (normalmente 32). * A GPU tem uma memória local acessada por todas SMX e uma memória local compartilhada (shared memory) que é mais rápida que a global, porém restringida ao uso apenas dentro de uma SMX. * * * Criando um kernel: * keyword -> __global__ (significa q a função sera chamada pelo host e executada no device). * __global__ void mykernel (void) { } * * * Invocando um Kernel: * mykernel<<<N,M>>> () -> N é numero de blocos e M numero de threads por bloco; * * * Precisamos alocar memoria na GPU para executar um kernel: * Ponteiros da GPU apontam para memoria na GPU e pontiero da CPU para memória da CPU. * cudaMalloc((void **) &variavel_cuda, tamanho_variavel); * cudaFree(variavel_cuda); * cudaMemcpy(variavel_cuda, variavel_CPU, tamanho, cudaMemcpyHostToDevice); * cudaMemcpy (variavel_CPU, variavel_cuda, tamanho, cudaMemcpyDeviceToHost); * * * Um kernel é executado uma grid de blocos, que é 3D, e temos algumas palavras reservadas: * threadIdx.x/y/z * blockIdx.x/y/z * blockDim.x/y/z -> variavel N setada na chamada do kernel na main. * gridDim -> N*M; * Podemos combinar varios blocos e varias threads, e o calculo de indice complica um pouco (int index = threadIdx.x + blockIdx.x * blockDim.x); * Vale a pena usar threads pois elas podem sincronizar e se comunicar! * * * Como acessar a memoria compartilhada do bloco: * palavra reservada __shared__ (usa dentro do kernel para variaveis criadas dentro do kernel. Exemplo: __shared__ int variavel). * o dado não é visível para threads em outros blocos. * * * __syncthreads(): usado para previnir hazard the read/write after write/read: * todas threads de um BLOCO devem chegar ate aquele ponto para progredir. * * * Coordenação: * as chamadas de kernels sao feitas de forma asincrona! * A CPU precisa sincronizar antes de ler os resultados da computação no device: * cudaMemcpy(); -> bloqueia a CPU e so retoma quando termina de copiar (e logo termina a computação da GPU) * cudaMemcpyAsync(); -> asincrono, nao bloqueia a GPU * cudaDevideSynchronize(); ->bloqueia a CPU ate que todos processos da GPU terminem. * * * Device Management: * cudaGetDeviceCount(int *count) * cudaSetDevice(int device) * cudaGetDevice(int *device) * cudaGetDeviceProperties(cudaDeviceProp *prop, int device) * * * Memoria: * uma linha inteira da DRAM é lida para o row buffer (memory core speed -> lento). * dessa linha um MUX é usado para selecionar qual word voce realmente quer (interface speed -> rapido) * DDRx -> memory core speed = 1/(2^x) interface speed. * para compensar essa lentidao uma linha da ram tem 2^x de bits em largura, e todos esses bits sao uma linha que vao para o row buffer, que seleciona a word desejada (isso é chamado de burst -> trazer varias palavras para o row buffer). * Também, aliado a isso pode-se ter varios bancos, um colados aos outros de DRAM, que operam em paralelo e aumentam o bandwidth (largura de banda -> bits/s transferidos). * * * Memory Coalescing: * O acesso a memoria é dito coalesced se todos endereços do burst foram usados e uncoalesced se nao usou todos endereços. * um acesso não é coalesced se uma das palavras reservadas (threadIdx, BlockIdx, etc) for multiplicada por uma constante. * * * Control divergence: * se um mesmo wrap executa um bloco com if's entao ha perda de eficiencia pois cada thread pode executar o bloco if ou o bloco else, introduzindo "nop's" ao que ele nao executar, aumentando assim o tempo de execução do programa. * * * Problema da Corida: * CUDA soluciona o problema da corrida com funções que fazem operações atomicas, como: * atomicAdd (int* adress, int value); * * * Observaçoes: * em questado de threadIdx.x/y/z * eixo horizontal eh o x (colunas) * eixo vertical eh o y (linhas) * profundidade eh o eixo z * * Tiling -> dividir o dado em blocos que cabe dentro de um bloco e utiliza-lo na shared memory * * Coalescing e Shared Memory são o segredo para se programar bem em CUDA */ #include <stdio.h> #include <stdlib.h> __global__ void add_vector (int * device_A, int * device_B, int * device_C) { int index = threadIdx.x + blockIdx.x * blockDim.x; device_C[index] = device_A[index] + device_B[index]; } int main () { int tamanho_dos_vetores, i; int *host_A, *host_B, *host_C; int *device_A, *device_B, *device_C; printf("Insira o tamanho dos vetores (multiplo de 4 e pelo menos 4): \n"); scanf("%d", &tamanho_dos_vetores); host_A = (int *) malloc (tamanho_dos_vetores * sizeof(int)); host_B = (int *) malloc (tamanho_dos_vetores * sizeof(int)); host_C = (int *) malloc (tamanho_dos_vetores * sizeof(int)); cudaMalloc ((void ** ) &device_A, tamanho_dos_vetores * sizeof(int)); cudaMalloc ((void ** ) &device_B, tamanho_dos_vetores * sizeof(int)); cudaMalloc ((void ** ) &device_C, tamanho_dos_vetores * sizeof(int)); printf("Insira os %d numeros do 1o vetor: \n", tamanho_dos_vetores); for (i = 0; i < tamanho_dos_vetores; i++) { scanf("%d", &host_A[i]); } printf("Insira os %d numeros do 2o vetor: \n", tamanho_dos_vetores); for (i = 0; i < tamanho_dos_vetores; i++) { scanf("%d", &host_B[i]); } cudaMemcpy (device_A, host_A, tamanho_dos_vetores * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy (device_B, host_B, tamanho_dos_vetores * sizeof(int), cudaMemcpyHostToDevice); add_vector<<<4, tamanho_dos_vetores/4>>>(device_A, device_B, device_C); cudaMemcpy (host_C, device_C, tamanho_dos_vetores * sizeof(int), cudaMemcpyDeviceToHost); printf("Resultado da Soma: \n"); printf("%d", host_C[0]); for (i = 1; i < tamanho_dos_vetores; i++) { printf(" %d", host_C[i]); } printf("\n"); free(host_A); free(host_B); free(host_C); cudaFree(device_A); cudaFree(device_B); cudaFree(device_C); return 0; }
11,967
#include "includes.h" __global__ void THCudaTensor_copyUpperSymmetric(float *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r > c) { input[idx] = input[r*n + c]; } } }
11,968
/* * Noopur Maheshwari : 111464061 * Rahul Rane : 111465246 */ #include <iostream> #include <map> #include <queue> #include <functional> //#include "scheduler.h" using namespace std; map<pthread_t, deque<function<void()>>> cpu_que_map; map<pthread_t, pthread_mutex_t> cpu_lock_map; map<pthread_t, deque<function<void()>>> gpu_que_map; map<pthread_t, pthread_mutex_t> gpu_lock_map; map<pthread_t, cudaStream_t> stream_map; extern bool completed; pthread_t get_random_cpu() { static unsigned long x=123456789, y=362436069, z=521288629; unsigned long t; x ^= x << 16; x ^= x >> 5; x ^= x << 1; t = x; x = y; y = z; z = t ^ x ^ y; auto itr = cpu_que_map.begin(); return next(itr, z%cpu_que_map.size())->first; } pthread_t get_random_gpu() { static unsigned long x=123456789, y=362436069, z=521288629; unsigned long t; x ^= x << 16; x ^= x >> 5; x ^= x << 1; t = x; x = y; y = z; z = t ^ x ^ y; auto itr = gpu_que_map.begin(); return next(itr, z%gpu_que_map.size())->first; } void* __do_work_cpu(void *data) { pthread_t tid, tid_stolen; while(!completed) { tid = pthread_self(); pthread_mutex_lock(&(cpu_lock_map[tid])); if (completed && cpu_que_map[tid].size() == 0) { pthread_mutex_unlock(&cpu_lock_map[tid]); break; } if (!cpu_que_map[tid].empty()) { cpu_que_map[tid].front()(); cpu_que_map[tid].pop_front(); pthread_mutex_unlock(&cpu_lock_map[tid]); continue; } else { pthread_mutex_unlock(&cpu_lock_map[tid]); } //Steal from other queue tid_stolen = get_random_cpu(); pthread_mutex_lock(&cpu_lock_map[tid_stolen]); if (completed && cpu_que_map[tid_stolen].size() == 0) { pthread_mutex_unlock(&cpu_lock_map[tid_stolen]); break; } if (!cpu_que_map[tid_stolen].empty()) { cpu_que_map[tid_stolen].back()(); cpu_que_map[tid_stolen].pop_back(); pthread_mutex_unlock(&cpu_lock_map[tid_stolen]); continue; } else { pthread_mutex_unlock(&cpu_lock_map[tid_stolen]); } } cout<<"CPU:"<<tid<<" exited"<<endl; pthread_exit(NULL); } void* __do_work_gpu(void *data) { pthread_t tid, tid_stolen; while(!completed) { tid = pthread_self(); pthread_mutex_lock(&(gpu_lock_map[tid])); if (completed && gpu_que_map[tid].size() == 0) { pthread_mutex_unlock(&gpu_lock_map[tid]); break; } if (!gpu_que_map[tid].empty()) { gpu_que_map[tid].front()(); gpu_que_map[tid].pop_front(); pthread_mutex_unlock(&gpu_lock_map[tid]); continue; } else { pthread_mutex_unlock(&gpu_lock_map[tid]); } //Steal from other queue tid_stolen = get_random_gpu(); pthread_mutex_lock(&gpu_lock_map[tid_stolen]); if (completed && gpu_que_map[tid_stolen].size() == 0) { pthread_mutex_unlock(&gpu_lock_map[tid_stolen]); break; } if (!gpu_que_map[tid_stolen].empty()) { gpu_que_map[tid_stolen].back()(); gpu_que_map[tid_stolen].pop_back(); pthread_mutex_unlock(&gpu_lock_map[tid_stolen]); continue; } else { pthread_mutex_unlock(&gpu_lock_map[tid_stolen]); } } cout<<"GPU:"<<tid<<" exited"<<endl; pthread_exit(NULL); }
11,969
// g++ -DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_OMP -I../../../thrust/ -fopenmp -x c++ exemplo3.cu -o exemplo3 && ./exemplo3 < ../17-intro-gpu/stocks.txt #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <iostream> #include <thrust/count.h> struct is_positivo { __host__ __device__ bool operator()(const double &x) { return x > 0; } }; struct is_negative { __host__ __device__ bool operator()(const double &x) { return x < 0; } }; int main() { int N = 0; thrust::host_vector<double> vec_cpu; while (std::cin.fail() == false) { N += 1; double price; std::cin >> price; vec_cpu.push_back(price); } // std::cout << N << "\n"; thrust::device_vector<double> stocks(vec_cpu); thrust::device_vector<double> ganho_diario(N - 1); thrust::transform(stocks.begin() + 1, stocks.end(), stocks.begin(), ganho_diario.begin(), thrust::minus<double>()); // quantas vezes o valor subiu? int result = thrust::count_if(ganho_diario.begin(), ganho_diario.end(), is_positivo()); std::cout << result << "\n"; // qual é o aumento médio, considerando só as vezes em que o valor aumentou de fato? thrust::replace_if(ganho_diario.begin(), ganho_diario.end(), ganho_diario.begin(), is_negative(), 0); double positiveSoma = thrust::reduce(ganho_diario.begin(), ganho_diario.end(), 0.0, thrust::plus<double>()); double aumento_medio = positiveSoma / result; std::cout << aumento_medio << "\n"; // for (auto i = stocks.begin(); i != stocks.end(); i++) // { // std::cout << *i << " "; // este acesso é rápido -- CPU // } // printf("\n"); // for (auto i = ganho_diario.begin(); i != ganho_diario.end(); i++) // { // std::cout << *i << " "; // este acesso é rápido -- CPU // } // printf("\n"); }
11,970
//16CO212 16CO249 //Computer Architecture Lab Assignment 0 //Question 2 //Array generated in the program is 1^2,2^2,3^2,...(N)^2 and here N=500 and Value of N is changable. #include<stdio.h> #include<cuda.h> #include<math.h> #define BLOCK_SIZE 1024 #define N 500 __global__ void AddArray(float *A, float* ans) { unsigned int tid = threadIdx.x; unsigned int i = blockDim.x * blockIdx.x + tid; for(unsigned int s = blockDim.x / 2; s>0; s >>=1) { if(tid < s) { A[i] += A[i + s]; } __syncthreads(); } if(tid == 0) //Returns the sum of the array elements { atomicAdd(ans, A[i]); } } int main() { float* A; float* ans; float* d_A; float* d_ans; A = (float *) malloc(N * sizeof(float)); int i; for(i=0; i<N; i++) { A[i] = (float)(i*2); } int blocks; blocks = ceil(N/1024.00); ans = (float*) malloc(sizeof(float)); *ans = 0; //Allocating device_memory cudaMalloc((void **)&d_A, N * sizeof(float)); cudaMalloc((void **)&d_ans, sizeof(float)); //Copying the memory from host to device cudaMemcpy(d_A, A, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_ans, ans, sizeof(float), cudaMemcpyHostToDevice); //invoke kernel AddArray<<<blocks, 1024>>>(d_A, d_ans); cudaMemcpy(ans, d_ans, sizeof(float), cudaMemcpyDeviceToHost); printf("Sum of generated array= %f\n", *ans); //Freeing the memory cudaFree(d_A); cudaFree(d_ans); free(A); free(ans); return 0; }
11,971
#include <stdio.h> #include <sys/time.h> // 计时操作 double cpuSecond(){ struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } void sumMatrixOnHost(float *h_A, float *h_B, float *h_C, const int nx, const int ny){ for(int i = 0; i < ny; i++){ for(int j = 0; j < nx; j++){ int idx = j + i*nx; h_C[idx] = h_A[idx] + h_B[idx]; } } } void initMatrix(float *ip, const int size){ for(int i =0; i < size; i++) ip[i] = i; } __global__ void sumMatrixOnDevice2D(float *d_A, float *d_B, float *d_C, const int nx, const int ny){ int ix = blockIdx.x*blockDim.x + threadIdx.x; int iy = blockIdx.y*blockDim.y + threadIdx.y; int idx = ix + iy*nx; if(idx < ny*nx) d_C[idx] = d_A[idx] + d_B[idx]; } __global__ void sumMatrixOnDevice1D(float *d_A, float *d_B, float *d_C, const int nx, const int ny){ unsigned int ix = threadIdx.x + blockDim.x*blockIdx.x; if(ix < nx){ for(int i = 0; i < ny; i++) { int idx = ix + i*nx; d_C[idx] = d_A[idx] + d_B[idx]; } } } void checkRef(float *h_Ref, float *d_Ref, const int nx, const int ny){ double epsilon = 1.0e-8; bool match = true; for(int i = 0; i < ny; i++){ for(int j = 0; j < nx; j++){ int idx = j + i*nx; if(abs(d_Ref[idx] - h_Ref[idx]) > epsilon){ match = false; printf("cpu and gpu not match,\n %d, %5.3f, %5.3f\n",idx,d_Ref[idx],h_Ref[idx]); break; } } } if(match){ printf("Result Match !\n"); } } int main(){ int dev = 1; cudaSetDevice(dev); // set size of data int nx = 1<<14; int ny = 1<<14; int nxy = nx*ny; int nBytes = sizeof(float)*nxy; float *h_A, *h_B, *h_Ref, *g_Ref; // malloc data h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_Ref = (float *)malloc(nBytes); g_Ref = (float *)malloc(nBytes); // printf("%f %f",h_B[0],h_B[0]); initMatrix(h_A,nxy); initMatrix(h_B,nxy); memset(h_Ref,0,nBytes); memset(g_Ref,0,nBytes); double iStart, iEp; iStart = cpuSecond(); // sumMatrixOnHost(h_A,h_B,h_Ref,nx, ny); // printf("h_Ref[1]: h_A[1] %f + h_B[1] %f = %f\n",h_A[1],h_B[1],h_Ref[1]); // return 0; iEp = cpuSecond() - iStart; printf(" sumMatrixOnHost time [%.3f]sec\n",iEp); // define d_A d_B, d_Ref float *d_A, *d_B, *d_Ref; cudaMalloc((void **)&d_A,nBytes); cudaMalloc((void **)&d_B,nBytes); cudaMalloc((void **)&d_Ref,nBytes); // cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); int dimx = 128, dimy = 1; dim3 block(dimx,dimy); dim3 grid((nx + dimx - 1)/dimx, (ny + dimy - 1)/dimy); iStart = cpuSecond(); sumMatrixOnDevice1D<<<grid,block>>>(d_A,d_B,d_Ref,nx,ny); cudaDeviceSynchronize(); iEp = cpuSecond() - iStart; printf("sumMatrixOnDevice2D time <<<(%d,%d),(%d,%d)>>> [%.3f]sec\n", grid.x,grid.y,block.x,block.y,iEp); // cp data from device to host cudaMemcpy(g_Ref,d_Ref,nBytes,cudaMemcpyDeviceToHost); checkRef(g_Ref,h_Ref,nx,ny); free(h_A); free(h_B); free(h_Ref); free(g_Ref); cudaFree(d_A); cudaFree(d_B); cudaFree(d_Ref); cudaDeviceReset(); return 0; }
11,972
// 这个程序选中符合指定条件的设备 // CUDA 支持 #include "cuda_runtime.h" #include "device_launch_parameters.h" // 传统 C++ 支持 #include <iostream> using namespace std; // 主函数,还没有输入参数 int main() { //定义需要的设备属性 cudaDeviceProp devicePropDefined; memset(&devicePropDefined, 0, sizeof(cudaDeviceProp)); //设置devicepropDefined的值 // 版本号的要求 devicePropDefined.major = 5; devicePropDefined.minor = 2; int devicedChoosed; //选中的设备ID cudaError_t cudaError; cudaGetDevice(&devicedChoosed); //获取当前设备ID cout << "当前使用设备的编号: " << devicedChoosed << endl; cudaChooseDevice(&devicedChoosed, &devicePropDefined); //查找符合要求的设备ID cout << "满足指定属性要求的设备的编号: " << devicedChoosed << endl; cudaError = cudaSetDevice(devicedChoosed); //设置选中的设备为下文的运行设备 if (cudaError == cudaSuccess) cout << "设备选取成功!" << endl; else cout << "设备选取失败!" << endl; char c; cin>>c; return 0; }
11,973
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <thrust/version.h> #include <iostream> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <cmath> // square<T> computes the square of a number f(x) -> x*x template <typename T> struct square { __host__ __device__ T operator()(const T& x) const { return x * x; } }; int main(void) { // initialize host array float x[4] = { 1.0, 2.0, 3.0, 4.0 }; // transfer to device thrust::device_vector<float> d_x(x, x + 4); // setup arguments square<float> unary_op; thrust::plus<float> binary_op; float init = 0; // compute norm float norm = std::sqrt(thrust::transform_reduce(d_x.begin(), d_x.end(), unary_op, init, binary_op)); std::cout << norm << std::endl; return 0; } //int main() { // int deviceCount; // cudaGetDeviceCount(&deviceCount); // // int dev; // for (dev = 0; dev < deviceCount; dev++) // { // int driver_version(0), runtime_version(0); // cudaDeviceProp deviceProp; // cudaGetDeviceProperties(&deviceProp, dev); // if (dev == 0) // if (deviceProp.minor = 9999 && deviceProp.major == 9999) // printf("\n"); // printf("\nDevice%d:\"%s\"\n", dev, deviceProp.name); // cudaDriverGetVersion(&driver_version); // printf("CUDA汾: %d.%d\n", driver_version / 1000, (driver_version % 1000) / 10); // cudaRuntimeGetVersion(&runtime_version); // printf("CUDAʱ汾: %d.%d\n", runtime_version / 1000, (runtime_version % 1000) / 10); // printf("豸: %d.%d\n", deviceProp.major, deviceProp.minor); // printf("Total amount of Global Memory: %u bytes\n", deviceProp.totalGlobalMem); // printf("Number of SMs: %d\n", deviceProp.multiProcessorCount); // printf("Total amount of Constant Memory: %u bytes\n", deviceProp.totalConstMem); // printf("Total amount of Shared Memory per block: %u bytes\n", deviceProp.sharedMemPerBlock); // printf("Total number of registers available per block: %d\n", deviceProp.regsPerBlock); // printf("Warp size: %d\n", deviceProp.warpSize); // printf("Maximum number of threads per SM: %d\n", deviceProp.maxThreadsPerMultiProcessor); // printf("Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); // printf("Maximum size of each dimension of a block: %d x %d x %d\n", deviceProp.maxThreadsDim[0], // deviceProp.maxThreadsDim[1], // deviceProp.maxThreadsDim[2]); // printf("Maximum size of each dimension of a grid: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); // printf("Maximum memory pitch: %u bytes\n", deviceProp.memPitch); // printf("Texture alignmemt: %u bytes\n", deviceProp.texturePitchAlignment); // printf("Clock rate: %.2f GHz\n", deviceProp.clockRate * 1e-6f); // printf("Memory Clock rate: %.0f MHz\n", deviceProp.memoryClockRate * 1e-3f); // printf("Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth); // } // // int major = THRUST_MAJOR_VERSION; // int minor = THRUST_MINOR_VERSION; // std::cout << "Thrust v" << major << "." << minor << std::endl; // // return 0; //}
11,974
/* #include <stdio.h> #include <math.h> #include <time.h> #include <iostream> #include <fstream> #include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.h" #include "book.h" #include "cusparse.h" */ template <typename T> __global__ void spmv_csr_scalar_kernel(T * d_val, T * d_vector, int * d_cols, int * d_ptr, int N, T * d_out) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = tid; i < N; i += blockDim.x * gridDim.x) { T t = 0; int start = d_ptr[i]; int end = d_ptr[i + 1]; // One thread handles all elements of the row assigned to it for (int j = start; j < end; j++) { int col = d_cols[j]; t += d_val[j] * d_vector[col]; } d_out[i] = t; } }
11,975
#include <iostream> using namespace std; const int N = 16; const int CORES = 16; /* this is the GPU kernel function */ __global__ void hello(char* s) { /* blockIdx is a struct containing our block id if this this is a one-dimensional kernel, then x is the block id y and z are also available for 2 or 3 dimensional kernels */ /* capitalize the string by subtracting 32 from each lowercase letter */ if ((s[blockIdx.x] >= 'a') && (s[blockIdx.x] <= 'z')) { s[blockIdx.x] -= 32; } } /* the main function begins running on the CPU */ int main( ) { /* this is the string data - it is 'hello world', in lower-case */ char cpu_string[N] = "hello world!"; /* allocate space on the GPU for the string */ char* gpu_string; cudaMalloc((void**) &gpu_string, N * sizeof(char)); /* send the character array to the GPU */ cudaMemcpy(gpu_string, cpu_string, N * sizeof(char), cudaMemcpyHostToDevice); /* invoke the GPU to run the kernel in parallel we specify CORES cores which each run once */ hello<<<CORES, 1>>>(gpu_string); /* copy the string back from the GPU to the CPU */ cudaMemcpy(cpu_string, gpu_string, N * sizeof(char), cudaMemcpyDeviceToHost); /* free the memory we allocated on the GPU */ cudaFree(gpu_string); /* print the string we got back from the GPU */ cout << cpu_string << endl; return 0; }
11,976
#pragma once // #include "fixnum/word_fixnum.cu" template < typename fixnum> class cubic_ext_element { public: typedef fixnum modnum; modnum a0; modnum a1; modnum a2; __device__ cubic_ext_element() { } }; template < typename fixnum, typename monty > class cubic_ext { public: typedef fixnum modnum; monty mod; modnum alpha; typedef cubic_ext_element<fixnum> ext_element; __device__ cubic_ext(fixnum modulus, fixnum _alpha) : mod(modulus), alpha(_alpha) { modnum t; mod.to_modnum(t, alpha); alpha = t; } __device__ void to_modnum(ext_element &z) { modnum t0, t1, t2; mod.to_modnum(t0, z.a0); mod.to_modnum(t1, z.a1); mod.to_modnum(t2, z.a2); z.a0 = t0; z.a1 = t1; z.a2 = t2; } __device__ void from_modnum(ext_element &z) { fixnum t0, t1, t2; mod.from_modnum(t0, z.a0); mod.from_modnum(t1, z.a1); mod.from_modnum(t2, z.a2); z.a0 = t0; z.a1 = t1; z.a2 = t2; } __device__ void add(ext_element &z, ext_element &x, ext_element &y) { modnum t0, t1, t2; mod.add(t0, x.a0, y.a0); mod.add(t1, x.a1, y.a1); mod.add(t2, x.a2, y.a2); z.a0 = t0; z.a1 = t1; z.a2 = t2; } __device__ void mul(ext_element &z, ext_element x, ext_element y) { modnum t0, t1, t2; modnum a0b0, a0b1, a0b2; modnum a1b0, a1b1, a1b2; modnum a2b0, a2b1, a2b2; mod.mul(a0b0, x.a0, y.a0); mod.mul(a0b1, x.a0, y.a1); mod.mul(a0b2, x.a0, y.a2); mod.mul(a1b0, x.a1, y.a0); mod.mul(a1b1, x.a1, y.a1); mod.mul(a1b2, x.a1, y.a2); mod.mul(a2b0, x.a2, y.a0); mod.mul(a2b1, x.a2, y.a1); mod.mul(a2b2, x.a2, y.a2); // c0 = a0*b0 + 11*(a1*b2 + a2*b1) mod.add(t0, a1b2, a2b1); mod.mul(t1, alpha, t0); mod.add(z.a0, a0b0, t1); // c1 = a0*b1 + a1*b0 + alpha*a2*b2 mod.mul(t0, alpha, a2b2); mod.add(t1, t0, a0b1); mod.add(z.a1, t1, a1b0); // c2 = a0*b2 + a1*b1 + a2*b0 mod.add(t0, a0b2, a1b1); mod.add(z.a2, t0, a2b0); } };
11,977
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <assert.h> #include <iostream> // CUDA kernel for vector addition __global__ void vectorAdd(int* a, int* b, int* c, int n) { // Calculate global thread ID (tid) int tid = (blockIdx.x * blockDim.x) + threadIdx.x; // Vector boundary guard if (tid < n) { // Each thread adds a single element c[tid] = a[tid] + b[tid]; } } // Initialize vector of size n to int between 0-99 void vector_init(int* a, int n) { for (int i = 0; i < n; i++) { a[i] = rand() % 100; } } // Check vector add result void check_answer(int* a, int* b, int* c, int n) { for (int i = 0; i < n; i++) { assert(c[i] == a[i] + b[i]); } } int main() { //Get the device id for cuda calls int id = cudaGetDevice(&id); // Vector size of 2^16 (65536 elements) int n = 1 << 16; //unified memory pointers int* a, * b, * c; // Allocation size for all vectors size_t bytes = sizeof(int) * n; // Allocate device memory cudaMallocManaged(&a, bytes); cudaMallocManaged(&b, bytes); cudaMallocManaged(&c, bytes); // Initialize vectors a and b with random values between 0 and 99 vector_init(a, n); vector_init(b, n); // Threadblock size int BLOCKS = 256; // Grid size int GRID = (int)ceil(n / BLOCKS); //call cuda kernrl //for prefetching a and b vectors to device to make sure data gets copied before kernel call cudaMemPrefetchAsync(a, bytes, id); cudaMemPrefetchAsync(b, bytes, id); // Launch kernel on default stream w/o shmem vectorAdd <<<GRID, BLOCKS >>> (a, b, c, n); //wait for all the previous operations before using values cudaDeviceSynchronize(); //for prefetching c vector to the host cudaMemPrefetchAsync(c, bytes, cudaCpuDeviceId); // Check result for errors check_answer(a, b, c, n); printf("COMPLETED SUCCESFULLY\n"); return 0; }
11,978
#include "includes.h" __global__ void LEQ(float * xf, bool * xb, size_t idxf, size_t idxb, size_t N) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { xb[idxb*N+i] = xf[(idxf-1)*N+i] <= xf[(idxf-2)*N+i]; } return; }
11,979
#include <cuda.h> #include <stdio.h> #include <time.h> #define N 10 typedef struct { float x[N]; float y[N]; float z[N]; } coord; __global__ void compute_dist2(float *x, float *y, float *z, float *result) { int index = threadIdx.x; float deltaX = x[index+1] - x[index]; float deltaY = y[index+1] - y[index]; float deltaZ = z[index+1] - z[index]; result[index] = deltaX*deltaX + deltaY*deltaY + deltaZ*deltaZ; } void loadRandomCoords(coord *c) { int i; srand(time(NULL)); for (i = 0; i<N; i++) { c->x[i] = rand(); c->y[i] = rand(); c->z[i] = rand(); } } void loadTestCoords(coord *c) { int i; for (i = 0; i<N; i++) { c->x[i] = i+1; c->y[i] = i+2; c->z[i] = i+3; } } void printArray(float *f, int len) { int i; for (i = 0; i<len; i++) { printf("%f ", f[i]); } printf("\n"); } int main() { coord c; float result[N-1]; float *dev_x, *dev_y, *dev_z, *dev_result; dim3 grid(1,1), block(N-1,1); //Load coordinates into host arrays. loadRandomCoords(&c); //Allocate memory for device pointers. cudaMalloc(&dev_x, N*sizeof(float)); cudaMalloc(&dev_y, N*sizeof(float)); cudaMalloc(&dev_z, N*sizeof(float)); cudaMalloc(&dev_result, (N-1)*sizeof(float)); //Transfer coordinates from host to device. cudaMemcpy(dev_x, c.x, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_y, c.y, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_z, c.z, N*sizeof(float), cudaMemcpyHostToDevice); //Call kernel to compute euclidean distance b/w adjacent points squared. //Be sure to only use device pointers since device can't access host mem //and vice versa. compute_dist2 <<<grid,block>>>(dev_x, dev_y, dev_z, dev_result); //Transfer results from device memory to host memory. cudaMemcpy(result, dev_result, (N-1)*sizeof(float), cudaMemcpyDeviceToHost); //Free device memory. cudaFree(dev_x); cudaFree(dev_y); cudaFree(dev_z); cudaFree(dev_result); printArray(result, N-1); return 0; }
11,980
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define MASK_WIDTH 5 #define BLOCK_SIZE 8 #define COMMENT "Histogram_GPU" #define RGB_COMPONENT_COLOR 255 typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *) malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM(PPMImage *img) { fprintf(stdout, "P6\n"); fprintf(stdout, "# %s\n", COMMENT); fprintf(stdout, "%d %d\n", img->x, img->y); fprintf(stdout, "%d\n", RGB_COMPONENT_COLOR); fwrite(img->data, 3 * img->x, img->y, stdout); fclose(stdout); } __global__ void smoothing_kernel(PPMPixel *image, PPMPixel *image_copy, int size_x, int size_y) { __shared__ PPMPixel image_ds[BLOCK_SIZE + MASK_WIDTH - 1][BLOCK_SIZE + MASK_WIDTH - 1]; int total_red; int total_blue; int total_green; int n = (MASK_WIDTH - 1)/2; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; // NOTES(ciroceissler): calcula apenas o elementos internos image_ds[n + threadIdx.x][n + threadIdx.y] = image_copy[(y * size_x) + x] ; int halo_index_left = (blockIdx.x - 1)*blockDim.x + threadIdx.x; int halo_index_right = (blockIdx.x + 1)*blockDim.x + threadIdx.x; int halo_index_top = (blockIdx.y - 1)*blockDim.y + threadIdx.y; int halo_index_bottom = (blockIdx.y + 1)*blockDim.y + threadIdx.y; // NOTES(ciroceissler): calcula os elementos do halo left if (threadIdx.x >= blockDim.x - n) { if (halo_index_left >= 0) { image_ds[threadIdx.x - (blockDim.x - n)][threadIdx.y + n] = image_copy[(y * size_x) + halo_index_left]; } else { image_ds[threadIdx.x - (blockDim.x - n)][threadIdx.y + n] = (PPMPixel) {0,0,0}; } } // NOTES(ciroceissler): calcula os elementos do halo right if (threadIdx.x < n) { if (halo_index_right < size_x) { image_ds[n + blockDim.x + threadIdx.x][threadIdx.y + n] = image_copy[(y * size_x) + halo_index_right]; } else { image_ds[n + blockDim.x + threadIdx.x][threadIdx.y + n] = (PPMPixel) {0,0,0}; } } // NOTES(ciroceissler): calcula os elementos do halo top if (threadIdx.y >= blockDim.y - n) { if (halo_index_top >= 0) { image_ds[threadIdx.x + n][threadIdx.y - (blockDim.y - n)] = image_copy[(halo_index_top * size_x) + x]; } else { image_ds[threadIdx.x + n][threadIdx.y - (blockDim.y - n)] = (PPMPixel) {0,0,0}; } } // NOTES(ciroceissler): calcula os elementos do halo bottom if (threadIdx.y < n) { if (halo_index_bottom < size_y) { image_ds[threadIdx.x + n][n + blockDim.y + threadIdx.y] = image_copy[(halo_index_bottom * size_x) + x]; } else { image_ds[threadIdx.x + n][n + blockDim.y + threadIdx.y] = (PPMPixel) {0,0,0}; } } // NOTES(ciroceissler): calcula os elementos do halo top-left if (threadIdx.x >= blockDim.x - n && threadIdx.y >= blockDim.y - n) { if (halo_index_left >= 0 && halo_index_top >= 0) { image_ds[threadIdx.x - (blockDim.x - n)][threadIdx.y - (blockDim.y - n)] = image_copy[(halo_index_top * size_x) + halo_index_left]; } else { image_ds[threadIdx.x - (blockDim.x - n)][threadIdx.y - (blockDim.y - n)] = (PPMPixel) {0,0,0}; } } // NOTES(ciroceissler): calcula os elementos do halo top-right if (threadIdx.x < n && threadIdx.y >= blockDim.y - n) { if (halo_index_right < size_x && halo_index_top >= 0) { image_ds[n + blockDim.x + threadIdx.x][threadIdx.y - (blockDim.y - n)] = image_copy[(halo_index_top * size_x) + halo_index_right]; } else { image_ds[n + blockDim.x + threadIdx.x][threadIdx.y - (blockDim.y - n)] = (PPMPixel) {0,0,0}; } } // NOTES(ciroceissler): calcula os elementos do halo bottom-left if (threadIdx.x >= blockDim.x - n && threadIdx.y < n) { if (halo_index_left >= 0 && halo_index_bottom < size_y) { image_ds[threadIdx.x - (blockDim.x - n)][n + blockDim.y + threadIdx.y] = image_copy[(halo_index_bottom * size_x) + halo_index_left]; } else { image_ds[threadIdx.x - (blockDim.x - n)][n + blockDim.y + threadIdx.y] = (PPMPixel) {0,0,0}; } } // NOTES(ciroceissler): calcula os elementos do halo bottom-right if (threadIdx.x < n && threadIdx.y < n) { if (halo_index_right < size_x && halo_index_bottom < size_y) { image_ds[n + blockDim.x + threadIdx.x][n + blockDim.y + threadIdx.y] = image_copy[(halo_index_bottom * size_x) + halo_index_right]; } else { image_ds[n + blockDim.x + threadIdx.x][n + blockDim.y + threadIdx.y] = (PPMPixel) {0,0,0}; } } // NOTES(ciroceissler): sincronizar todas as threads __syncthreads(); total_red = 0; total_blue = 0; total_green = 0; // NOTES(ciroceissler): calculo do smooth for (int j = threadIdx.y; j < threadIdx.y + MASK_WIDTH; j++) { for (int i = threadIdx.x; i < threadIdx.x + MASK_WIDTH; i++) { total_red += image_ds[i][j].red ; total_blue += image_ds[i][j].blue ; total_green += image_ds[i][j].green ; } } image[(y * size_x) + x].red = total_red / (MASK_WIDTH*MASK_WIDTH); image[(y * size_x) + x].blue = total_blue / (MASK_WIDTH*MASK_WIDTH); image[(y * size_x) + x].green = total_green / (MASK_WIDTH*MASK_WIDTH); } int main(int argc, char *argv[]) { if( argc != 2 ) { printf("Too many or no one arguments supplied.\n"); } #ifdef __DEBUG__ double t_start, t_end; #endif // __DEBUG__ char *filename = argv[1]; //Recebendo o arquivo!; std::size_t size_image; PPMImage *image = readPPM(filename); PPMImage *image_output = readPPM(filename); size_image = sizeof(PPMPixel)*image->x*image->y; // NOTES(ciroceissler): variaveis do device PPMPixel *d_image; PPMPixel *d_image_output; // NOTES(ciroceissler): dimensoes dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 dimGrid(ceil((float)image->x / BLOCK_SIZE), ceil((float)image->y / BLOCK_SIZE), 1); #ifdef __DEBUG__ t_start = rtclock(); #endif // __DEBUG__ // NOTES(ciroceissler): alocacao dos buffers do device cudaMalloc((void **) &d_image , size_image); cudaMalloc((void **) &d_image_output, size_image); // NOTES(ciroceissler): copiar os vetores para o device cudaMemcpy(d_image, image->data, size_image, cudaMemcpyHostToDevice); // NOTES(ciroceissler): execucaco do kernel smoothing_kernel<<<dimGrid, dimBlock>>>(d_image_output, d_image, image->x, image->y); cudaDeviceSynchronize(); // NOTES(ciroceissler): copiar o valor da GPU cudaMemcpy(image_output->data, d_image_output, size_image, cudaMemcpyDeviceToHost); #ifdef __DEBUG__ t_end = rtclock(); fprintf(stdout, "\n%0.6lfs\n", t_end - t_start); #endif // __DEBUG__ writePPM(image_output); free(image); free(image_output); } // ------------------------------------------------------------------------------------ // input | cpu_serial | gpu_nosharedmemory | gpu_sharedmemory | speedup (cpu/gpusm) // ------------------------------------------------------------------------------------ // arq1.ppm | 0.302824 | 0.667255 | 0.633555 | 0.4779758663 // arq2.ppm | 0.678099 | 0.755541 | 0.655261 | 1.034853287 // arq3.ppm | 2.710807 | 1.076326 | 0.751093 | 3.609149599 // ------------------------------------------------------------------------------------ // // - numero de acesso a memoria global substituido por shared memory: // // TILE_ACCESS = (BLOCK_SIZE*BLOCK_SIZE) * (MASK_WIDTH*MASK_WIDTH) // // - elementos carregados para cada TILE: // // INPUT_ACCESS = (BLOCK_SIZE + MASK_WIDTH - 1)^2 // // - reduction: // // REDUCTION = TILE_ACCESS/INPUT_ACCESS // // --------------------------------------------------------------- // MASK_WIDHT\BLOCK_SIZE | 8X8 | 14X14 | 15X15 | 16X16 | 32X32 // --------------------------------------------------------------- // 5 | 11.11 | 15.12 | 15.58 | 16.00 | 19.75 // 7 | 16.00 | 24.01 | 25.00 | 25.92 | 34.75 // 9 | 20.25 | 32.80 | 34.45 | 36.00 | 51.84 // 11 | 23.90 | 41.17 | 43.56 | 45.82 | 70.24 // 13 | 27.04 | 49.00 | 52.16 | 55.18 | 89.39 // --------------------------------------------------------------- // // taf!
11,981
#include<stdio.h> #include<stdlib.h> #include<string.h> #include <cuda_runtime_api.h> #define restrict __restrict__ #define PADDINGCLASS -2 #define OUTPUT_FILE "ocuda" #define INPUT_FILE "data" #define KMAX 20 #define CLASSESMAX 100 #define SPACEDIMMAX 100 void check_error(cudaError_t err, const char *msg); void printStats(cudaEvent_t before, cudaEvent_t after, const char *msg); void readInput(FILE* file, float* coords, int* classes, int spacedim, int numels, int totalElements); void writeOutput(float* coords, int* classes, int spacedim, int numels); __global__ void knn(float* const restrict coords, float2* restrict kOutput, const int totalElements, const int numels, const int spacedim, const int k, int* restrict classes, const int classes_num); __global__ void knnPunisher(float2* restrict kOutput, int* restrict classes, const int numels, const int newels, const int k, const int classes_num); __device__ float distance(float* const coords, float* const coords2, const int spacedim); __device__ int insert(float* kPoints, float2 newDist, int* size, const int k, const int gid, const int offset); __device__ void swap(float* x1, float* x2, float* y1, float* y2); __device__ int deviceFindMode(int* kclasses, int classes_num, int k); __device__ float distanceShm(float* coords, int left, int spacedim); int main(int argc, char *argv[]) { int newels; //number of points we want classify int k; //number of nearest points we use to classify int numels; //total element already classified int spacedim; char filePath[255]; //path + filname of input file int classes_num; //number of classes float* h_coords; //coords of existing points with a class int* h_classes; //array contains the class for each points //*** Device-variables-declaration *** float* d_coords; int2* d_determinate; int* d_classes; float2* d_kOutput; //*** end-device-declaration //***cudaEvent-declaration*** cudaEvent_t before_allocation, before_input, before_upload, before_knn, before_download; cudaEvent_t after_allocation, after_input, after_upload, after_knn, after_download; //***end-cudaEvent-declaration*** //Requisiti: numels e newels devono essere maggiori di K if (argc > 2) { strcpy(filePath, argv[1]); k = atoi(argv[2]); } else { printf("how-to-use: knn <inputfile> <k> \n"); exit(1); } //***cuda-init-event*** check_error(cudaEventCreate(&before_allocation), "create before_allocation cudaEvent"); check_error(cudaEventCreate(&before_input), "create before_input cudaEvent"); check_error(cudaEventCreate(&before_upload), "create before_upload cudaEvent"); check_error(cudaEventCreate(&before_knn), "create before_knn cudaEvent"); check_error(cudaEventCreate(&before_download), "create before_download cudaEvent"); check_error(cudaEventCreate(&after_allocation), "create after_allocation cudaEvent"); check_error(cudaEventCreate(&after_input), "create after_input cudaEvent"); check_error(cudaEventCreate(&after_upload), "create after_upload cudaEvent"); check_error(cudaEventCreate(&after_knn), "create after_knn cudaEvent"); check_error(cudaEventCreate(&after_download), "create after_download cudaEvent"); //***end-cuda-init-event*** FILE *fp; if((fp = fopen(filePath, "r")) == NULL) { printf("No such file\n"); exit(1); } fseek(fp, 0L, SEEK_END); float fileSize = ftell(fp); rewind(fp); int count = fscanf(fp, "%d,%d,%d,%d\n", &numels, &newels, &classes_num, &spacedim); int totalElements = numels + newels; //*** allocation *** cudaEventRecord(before_allocation); h_coords = (float*) malloc(sizeof(float)*totalElements*spacedim); h_classes = (int*) malloc(sizeof(int)*totalElements); //*** device-allocation *** check_error(cudaMalloc(&d_coords, totalElements*spacedim*sizeof(float)), "alloc d_coords_x"); check_error(cudaMalloc(&d_classes, totalElements*sizeof(int)), "alloc d_classes"); check_error(cudaMalloc(&d_determinate, newels*2*sizeof(int)), "alloc d_determinate"); check_error(cudaMalloc(&d_kOutput, newels*KMAX*2*sizeof(float)), "alloc d_kOutput"); //*** end-device-allocation *** cudaEventRecord(after_allocation); ///***input-from-file*** cudaEventRecord(before_input); readInput(fp, h_coords, h_classes, spacedim, numels, totalElements); cudaEventRecord(after_input); fclose(fp); ///***end-input-from-file*** //***copy-arrays-on-device*** cudaEventRecord(before_upload); check_error(cudaMemcpy(d_coords, h_coords, totalElements*spacedim*sizeof(float), cudaMemcpyHostToDevice), "copy d_coords"); check_error(cudaMemcpy(d_classes, h_classes, totalElements*sizeof(int), cudaMemcpyHostToDevice), "copy d_classes"); cudaEventRecord(after_upload); //***end-copy-arrays-on-device*** int blockSize = 512; int numBlocks = (newels + blockSize - 1)/blockSize; cudaEventRecord(before_knn); knn<<<numBlocks, blockSize, (blockSize*sizeof(float)*2*k)>>>(d_coords, d_kOutput, totalElements, numels, spacedim, k, d_classes, classes_num); blockSize = 32; numBlocks = (newels + blockSize - 1)/blockSize; knnPunisher<<<numBlocks, blockSize, newels*sizeof(int)>>>(d_kOutput, d_classes, numels, newels, k, classes_num); cudaEventRecord(after_knn); check_error(cudaMemcpy(h_classes+numels, d_classes+numels, newels*sizeof(int), cudaMemcpyDeviceToHost), "download classes"); check_error(cudaEventSynchronize(after_knn), "sync cudaEvents"); printStats(before_knn, after_knn, "knn"); writeOutput(h_coords, h_classes, spacedim, totalElements); return 0; } void check_error(cudaError_t err, const char *msg) { if (err != cudaSuccess) { fprintf(stderr, "%s : error %d (%s)\n", msg, err, cudaGetErrorString(err)); exit(err); } } float runtime; void printStats(cudaEvent_t before, cudaEvent_t after, const char *msg) { check_error(cudaEventElapsedTime(&runtime, before, after), msg); printf("%s %gms\n", msg, runtime); } __global__ void knn(float* const restrict coords, float2* restrict kOutput, const int totalElements, const int numels, const int spacedim, const int k, int* restrict classes, const int classes_num) { extern __shared__ float shm[]; int gid = numels + threadIdx.x + blockIdx.x*blockDim.x; if (gid >= totalElements) return; float* newPointCoords = coords+spacedim*gid; float* pointCoords; int offset = blockDim.x; int lid = threadIdx.x; int i = 0, size = 0, count = 0; float point[SPACEDIMMAX]; for (i = 0; i < spacedim; i++) point[i] = newPointCoords[i]; float2 dist; pointCoords = coords; for (i = 0; i < numels; i++) { dist = make_float2(distance(point, pointCoords, spacedim), i); insert(shm+lid, dist, &size, k, gid, offset); pointCoords += spacedim; } for (count=0; i < gid; i++) { dist = make_float2(distance(point, pointCoords, spacedim), i); count += insert(shm+lid, dist, &size, k, gid, offset); pointCoords += spacedim; } if (count > 0) { classes[gid] = -1; } else { int kclasses[KMAX]; for (int j = 0; j < k; j++) kclasses[j] = classes[(int)(shm[(j+k)*offset+lid])]; classes[gid] = deviceFindMode(kclasses, classes_num, k); } //copia kPoints in kOutput int newelId = gid-numels; for (i = 0; i < k; i++) { kOutput[newelId*KMAX + i].x = shm[i*offset + lid]; kOutput[newelId*KMAX + i].y = shm[(i+k)*offset + lid]; } } __global__ void knnPunisher(float2* restrict kOutput, int* restrict classes, const int numels, const int newels, const int k, const int classes_num) { //Declaration of shared-memory. It's going to contains partial minimum of distances extern __shared__ int mPartial[]; int gid = threadIdx.x + blockIdx.x*blockDim.x; if (gid >= newels) return; //stiamo lancio un kernel per ogni newels //prendiamo la classe del newels e la mettiamo nella shared memory int i, id, lid, kclasses[KMAX], kPoints[KMAX], count = 1; //Se sono qui la classe per il kPoint è da determinare for (i = 0; i < k; i++) kPoints[i] = kOutput[gid*KMAX+i].y; //gid while(count != 0) { for (i = 0; i < gid; i++) mPartial[i] = classes[i+numels]; mPartial[gid] = classes[gid+numels]; if (mPartial[gid] != -1) return; __syncthreads(); //Le sue dipendenze, se già determinate stanno nella shared-memory count = 0; for (i = k-1; i >= 0; i--) { id = kPoints[i]; lid = id - numels; if (id > numels && mPartial[lid] < 0) { //segno quelli indeterminati count++; break; } } if (count == 0) { //posso determinare il punto //le sue dipendenze si trovano in shared memory for (i = 0; i < k; i++) kclasses[i] = classes[kPoints[i]]; classes[gid+numels] = deviceFindMode(kclasses, classes_num, k); } } } __device__ int deviceFindMode(int* kclasses, int classes_num, int k) { int classesCount[CLASSESMAX]; int i; int temp=0; for (i = 0; i < CLASSESMAX; i++) classesCount[i] = 0; for (i = 0; i < k; i++){ temp=kclasses[i]; classesCount[temp]+=1; } int max = 0; int maxValue = classesCount[0]; for (i = 1; i < classes_num; i++) { int value = classesCount[i]; if (value > maxValue) { max = i; maxValue = value; } else if (value != 0 && maxValue == value) { int j = 0; for (j = 0; j < k; j++) { if (kclasses[j] == i) { max = i; break; } else if (kclasses[j] == max) break; } } } return max; } //inserimento smart in kPoints __device__ int insert(float* kPoints, float2 newDist, int* size, const int k, const int gid, const int offset) { int inserted = 0; if (*size == 0) { //Caso base: inserimento su array vuoto kPoints[0] = newDist.x; kPoints[k*offset] = newDist.y; *size = *size + 1; return 1; } int i = 1; float* valueX, *valueY, *tailX, *tailY; valueX = &(newDist.x); valueY = &(newDist.y); tailX = &(kPoints[(*size-i)*offset]); tailY = &(kPoints[(*size-i+k)*offset]); if (*size < k) { kPoints[(*size)*offset] = newDist.x; kPoints[((*size)+k)*offset] = newDist.y; valueX = &(kPoints[(*size)*offset]); valueY = &(kPoints[((*size)+k)*offset]); inserted = 1; } //partire della fine, swap se trovo elemento più grande - mi fermo se trovo elemento più piccolo while (i <= *size && *(tailX) > *(valueX)) { swap(tailX, valueX, tailY, valueY); valueX = tailX; valueY = tailY; i++; tailX = &(kPoints[(*size-i)*offset]); tailY = &(kPoints[(*size-i+k)*offset]); inserted = 1; } if (inserted && *size < k) *size = *size + 1; return inserted; } __device__ void swap(float* x1, float* x2, float* y1, float* y2) { float tmp; tmp = *(x1); *(x1) = *(x2); *(x2) = tmp; tmp = *(y1); *(y1) = *(y2); *(y2) = tmp; } // read input from file void readInput(FILE* file, float* coords, int* classes, int spacedim, int numels, int totalElements) { int i, j; int count; for(i=0; i<numels; i++) { for (j = 0; j < spacedim; j++) count = fscanf(file, "%f,", &(coords[i*spacedim +j])); count = fscanf(file, "%d\n", &(classes[i])); } for(; i < totalElements; i++) { for (j = 0; j < spacedim; j++) count = fscanf(file, "%f,", &(coords[i*spacedim+j])); count = fscanf(file, "-1\n"); } count++; } //Write Output on file void writeOutput(float* coords, int* classes, int spacedim, int numels) { FILE *fp; fp = fopen(OUTPUT_FILE, "w"); int i, j; for( i = 0; i < numels; i++) { for (j = 0; j < spacedim; j++) fprintf(fp, "%lf,", coords[i*spacedim+j]); fprintf(fp, "%d\n", classes[i]); } fclose(fp); } //multidimensional euclidian distance (without sqrt) __device__ float distance(float* const coords, float* const coords2, const int spacedim) { float sum = 0; int i; for (i = 0; i < spacedim; i++) { float diff = coords[i] - coords2[i]; sum += diff*diff; } return sum; }
11,982
#include <stdio.h> #include <string.h> __global__ void what_is_my_id(int N, unsigned int * const block, unsigned int * const thread, unsigned int * const warp, unsigned int * const calc_thread, unsigned int * const completionTime) { const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x; if(thread_idx >= N) { return ; } block[thread_idx] = blockIdx.x; thread[thread_idx] = threadIdx.x; warp[thread_idx] = threadIdx.x / warpSize; calc_thread[thread_idx] = thread_idx; completionTime[thread_idx] = clock(); } void formatPrinter(const char* matName,int N, unsigned int * const mat) { printf("%s:", matName); for(int i=0;i<N;i++) { if (i%16==0) { printf("\n"); } printf("%d\t", mat[i]); } printf("\n\n"); } void showResult(int N, unsigned int * const block, unsigned int * const thread, unsigned int * const warp, unsigned int * const calc_thread, unsigned int * const completionTime) { const char *colors[5] = {"block", "thread", "warp", "calc_thread", "completionTime"}; formatPrinter(colors[0], N, block); formatPrinter(colors[1], N, thread); formatPrinter(colors[2], N, warp); formatPrinter(colors[3], N, calc_thread); formatPrinter(colors[4], N, completionTime); } int main() { int deviceId; int numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); const int N = 1 << 6; const int limN = (N / 32 + 1) * 32; size_t size = N * sizeof(int); unsigned int *block; unsigned int *thread; unsigned int *warp; unsigned int *calc_thread; unsigned int *completionTime; cudaMallocManaged(&block, size); cudaMallocManaged(&thread, size); cudaMallocManaged(&warp, size); cudaMallocManaged(&calc_thread, size); cudaMallocManaged(&completionTime, size); cudaError_t asyncErr; what_is_my_id<<<2, 37>>>(N, block, thread, warp, calc_thread, completionTime); asyncErr = cudaDeviceSynchronize(); if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr)); showResult(N, block, thread, warp, calc_thread, completionTime); cudaFree(block); cudaFree(thread); cudaFree(warp); cudaFree(calc_thread); cudaFree(completionTime); }
11,983
#include "includes.h" __global__ void zeros(double *field, int n){ int xid = blockDim.x*blockIdx.x + threadIdx.x; if (xid < n){ field[xid] = 0; } }
11,984
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include <string.h> #include <sys/mman.h> #include <unistd.h> #include <sys/types.h> #define BLOCK_SIZE 256 #define STR_SIZE 256 #define HALO 1 // halo width along one direction when advancing to the next iteration #define BENCH_PRINT void run(int argc, char** argv); long rows, cols; int* data; #define M_SEED 9 void init(int argc, char** argv) { char *folder; char *filepath; if (argc == 4) { cols = atol(argv[1]); rows = atol(argv[2]); folder = argv[3]; } else { printf("Usage: %s row_len col_len folder\n", argv[0]); exit(0); } filepath = (char *)malloc(sizeof(char) * (strlen(folder) + 128)); if (!filepath) { fprintf(stderr, "Cannot allocate filepath"); exit(EXIT_FAILURE); } sprintf(filepath, "%s/data.mem", folder); FILE *fp; if ((fp = fopen(filepath, "w+")) == 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if (ftruncate(fileno(fp), sizeof(int) * rows * cols) != 0) { fprintf(stderr, "error: can not truncate %s\n", filepath); perror("ftruncate"); exit(EXIT_FAILURE); } data = (int *)mmap(NULL, sizeof(int) * rows * cols, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fileno(fp), 0); if (!data) { fprintf(stderr, "Cannot mmap %s.\n", filepath); exit(EXIT_FAILURE); } int seed = M_SEED; srand(seed); for (long i = 0; i < rows; i++) { for (long j = 0; j < cols; j++) { data[i * cols + j] = rand() % 10; } } fflush(fp); fclose(fp); free(filepath); } int main(int argc, char** argv) { init(argc, argv); return EXIT_SUCCESS; }
11,985
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> __global__ void vectorAdd(float *, float *, float *, int); __global__ void vectorAdd(float *A, float *B, float *C, int n) { // CUDA kernel definition int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) { C[i] = A[i] + B[i]; } return; } void vecAdd(float *h_A, float *h_B, float *h_C, int n) { // host program int size = n * sizeof(float); float *d_A = NULL, *d_B = NULL, *d_C = NULL; // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; err = cudaMalloc((void **) &d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void **) &d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void **) &d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } int threadsPerBlock = 256; int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, n); err = cudaGetLastError(); // device function (CUDA kernel) called from host does not have return type // CUDA runtime functions (execute in host side) can have return type if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // Verify that the result vector is correct for (int i = 0; i < n; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); return; } int main() { int n; float *h_A, *h_B, *h_C; int i; printf("Enter the number of elements: "); scanf("%d", &n); h_A = (float *) malloc(sizeof(float) * n); h_B = (float *) malloc(sizeof(float) * n); h_C = (float *) malloc(sizeof(float) * n); srand(time(0)); for (i = 0; i < n; ++i) { h_A[i] = rand(); h_B[i] = rand(); } vecAdd(h_A, h_B, h_C, n); return 0; }
11,986
#include "includes.h" __global__ void shuffleGene(float *gene, float *fit, float *rSeed, int* metaData) { const int idx = threadIdx.x + blockDim.x*blockIdx.x; int nGene = metaData[1]; int nHalf = nGene / 2; if(idx> nHalf) return; int Offset = int(nHalf/5.3); int j = nHalf + (idx + Offset)%nHalf; for(int k=0; k<6; k++) { float t = gene[idx*6+k]; gene[idx*6+k] = gene[j*6+k]; gene[j*6+k] = t; t = fit[idx]; fit[idx] = fit[j]; fit[j] = t; } }
11,987
#include <sys/time.h> float timedifference_msec(struct timeval t0, struct timeval t1) { return (t1.tv_sec - t0.tv_sec) * 1000.0f + (t1.tv_usec - t0.tv_usec) / 1000.0f; }
11,988
#include <stdio.h> #include <assert.h> // Simple utility function to check for CUDA runtime errors void checkCUDAError (const char *msg); class A { public: int a; float b; public: int get_a (); void set_a (int val); }; int A::get_a () { return this->a; } void A::set_a (int val) { this->a = val; } // Part 3 of 5: implement the kernel __global__ void myFirstKernel(int *d_a) { int idx = blockIdx.x * blockDim.x + threadIdx.x; d_a[idx] = idx; } __global__ void reduce(float *idata, float *odata) { extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = idata[i]; __syncthreads(); for(unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if(tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } if(tid == 0) { odata[blockIdx.x] = sdata[0]; } } int cuda_test_1 (int argc, char** argv) { // pointer for host memory int *h_a; // pointer for device memory int *d_a; // define grid and block size int numBlocks = 8; int numThreadsPerBlock = 8; // Part 1 of 5: allocate host and device memory size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int); h_a = (int *) malloc(memSize); cudaMalloc((void **) &d_a, memSize); checkCUDAError("cudaMalloc"); // Part 2 of 5: launch kernel dim3 dimGrid(numBlocks, 1, 1); dim3 dimBlock(numThreadsPerBlock, 1, 1); myFirstKernel<<<dimGrid, dimBlock>>>(d_a); // block until the device has completed cudaThreadSynchronize(); // check if kernel execution generated an error checkCUDAError("kernel execution"); // Part 4 of 5: device to host copy cudaMemcpy( h_a, d_a, memSize, cudaMemcpyDeviceToHost ); // Check for any CUDA errors checkCUDAError("cudaMemcpy"); // Part 5 of 5: verify the data returned to the host is correct for (int i = 0; i < numBlocks; i++) { for (int j = 0; j < numThreadsPerBlock; j++) { assert (h_a[i * numThreadsPerBlock + j] == i * numThreadsPerBlock + j); } } // free device memory cudaFree(d_a); // free host memory free(h_a); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError (const char *msg) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf (stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString (err)); exit(-1); } } int cuda_mem_test (int argc, char** argv) { void *test[4]; int alloc_mb; const int MB = (1024*1024); //for (alloc_mb = 1024; alloc_mb <= 1024*1024*1024; alloc_mb *= 2) { for (alloc_mb = 100; alloc_mb <= 1100; alloc_mb += 100) { printf ("Alloc = 4 x %d MB = %d MB\n", alloc_mb, 4*alloc_mb); cudaMalloc ((void**) &test[0], alloc_mb * MB); checkCUDAError ("cudaMalloc[0]"); cudaMalloc ((void**) &test[1], alloc_mb * MB); checkCUDAError ("cudaMalloc[1]"); cudaMalloc ((void**) &test[2], alloc_mb * MB); checkCUDAError ("cudaMalloc[2]"); cudaMalloc ((void**) &test[3], alloc_mb * MB); checkCUDAError ("cudaMalloc[3]"); cudaFree (test[0]); checkCUDAError ("cudaFree"); cudaFree (test[1]); checkCUDAError ("cudaFree"); cudaFree (test[2]); checkCUDAError ("cudaFree"); cudaFree (test[3]); checkCUDAError ("cudaFree"); } return 0; } int main (int argc, char** argv) { // do some c++ here A a1; a1.set_a (32); printf ("C++ test: %d\n", a1.get_a()); // cuda_test_1 (argc, argv); return cuda_mem_test (argc, argv); }
11,989
/* * The function of this file implements Matrix Norm in GPU using CUDA. * * input: N: the Matrix Size (required) * Random Seed : use for initial matrix value (not required) * * __global__ void kernel(float *d_A, float *d_B, int n) finish the function in GPU * * d_A transfer data from host to device * d_B get the result from device to host * * @date : 3/9/2014 */ /* ****** ADD YOUR CODE AT THE END OF THIS FILE. ****** * You need not submit the provided code. */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <sys/types.h> #include <sys/times.h> #include <sys/time.h> #include <time.h> /* Program Parameters */ #define MAXN 8000 /* Max value of N */ #define CHECK_ERR(x) \ if (x != cudaSuccess) { \ fprintf(stderr,"%s in %s at line %d\n", \ cudaGetErrorString(res),__FILE__,__LINE__); \ exit(-1); \ } \ int N; /* Matrix size */ /* Matrices */ float A[MAXN][MAXN], B[MAXN][MAXN]; /* junk */ #define randm() 4|2[uid]&3 /* Prototype */ void matrixNorm(); /* returns a seed for srand based on the time */ unsigned int time_seed() { struct timeval t; struct timezone tzdummy; gettimeofday(&t, &tzdummy); return (unsigned int)(t.tv_usec); } /* Set the program parameters from the command-line arguments */ void parameters(int argc, char **argv) { int seed = 0; /* Random seed */ char uid[32]; /*User name */ /* Read command-line arguments */ srand(time_seed()); /* Randomize */ if (argc == 3) { seed = atoi(argv[2]); srand(seed); printf("Random seed = %i\n", seed); } if (argc >= 2) { N = atoi(argv[1]); if (N < 1 || N > MAXN) { printf("N = %i is out of range.\n", N); exit(0); } } else { printf("Usage: %s <matrix_dimension> [random seed]\n", argv[0]); exit(0); } /* Print parameters */ printf("\nMatrix dimension N = %i.\n", N); } /* Initialize A and B*/ void initialize_inputs() { int row, col; printf("\nInitializing...\n"); for (col = 0; col < N; col++) { for (row = 0; row < N; row++) { A[row][col] = (float)rand() / 32768.0; B[row][col] = 0.0; } } } /* Print input matrices */ void print_inputs() { int row, col; if (N < 10) { printf("\nA =\n\t"); for (row = 0; row < N; row++) { for (col = 0; col < N; col++) { printf("%5.2f%s", A[row][col], (col < N-1) ? ", " : ";\n\t"); } } } } void print_B() { int row, col; if (N < 10) { printf("\nB =\n\t"); for (row = 0; row < N; row++) { for (col = 0; col < N; col++) { printf("%1.10f%s", B[row][col], (col < N-1) ? ", " : ";\n\t"); } } } } // __global__ void kernel(float *d_A, float *d_B, int n) { int tId = threadIdx.x; int blockId = blockIdx.x; float mu; float sigma; int i; //int index = threadIdx.x + blockIdx.x*blockDim.x; if( blockId < n){ for(i=0; i<n; i++){ mu += d_A[tId + i*n]; } mu /= n; for(i=0; i<n; i++){ sigma += powf((d_A[tId + i*n] - mu), 2.0); } sigma /= n; sigma = powf(sigma, 0.5); for(i=0; i<n; i++){ if(sigma == 0.0){ d_B[tId + i*n] = 0.0; }else{ d_B[tId + i*n] = (d_A[tId + i*n] - mu)/sigma; } } } } int main(int argc, char **argv) { /* Timing variables */ struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */ struct timezone tzdummy; clock_t etstart2, etstop2; /* Elapsed times using times() */ unsigned long long usecstart, usecstop; struct tms cputstart, cputstop; /* CPU times for my processes */ /* Process program parameters */ parameters(argc, argv); /* Initialize A and B */ initialize_inputs(); /* Print input matrices */ print_inputs(); /* Start Clock */ printf("\nStarting clock.\n"); gettimeofday(&etstart, &tzdummy); etstart2 = times(&cputstart); /* Gaussian Elimination */ matrixNorm(); /* Stop Clock */ gettimeofday(&etstop, &tzdummy); etstop2 = times(&cputstop); printf("Stopped clock.\n"); usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec; usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec; /* Display output */ print_B(); /* Display timing results */ printf("\n Total Elapsed time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000); printf("(CPU times are accurate to the nearest %g ms)\n", 1.0/(float)CLOCKS_PER_SEC * 1000.0); printf("My total CPU time for parent = %g ms.\n", (float)( (cputstop.tms_utime + cputstop.tms_stime) - (cputstart.tms_utime + cputstart.tms_stime) ) / (float)CLOCKS_PER_SEC * 1000); printf("My system CPU time for parent = %g ms.\n", (float)(cputstop.tms_stime - cputstart.tms_stime) / (float)CLOCKS_PER_SEC * 1000); printf("My total CPU time for child processes = %g ms.\n", (float)( (cputstop.tms_cutime + cputstop.tms_cstime) - (cputstart.tms_cutime + cputstart.tms_cstime) ) / (float)CLOCKS_PER_SEC * 1000); /* Contrary to the man pages, this appears not to include the parent */ printf("--------------------------------------------\n"); exit(0); } /* ------------------ Above Was Provided --------------------- */ /****** You will replace this routine with your own parallel version *******/ /* Provided global variables are MAXN, N, A[][] and B[][], * defined in the beginning of this code. B[][] is initialized to zeros. */ void matrixNorm(){ cudaError_t res; float *d_A, *d_B; float *h_A, *h_B; int i,j; int size = N*N*sizeof(float); h_A = (float*) malloc(size); h_B = (float*) malloc(size); for(i=0; i<N; i++){ for(j=0; j<N; j++){ h_A[i*N + j] = A[i][j]; } } //allocate memory for d_A and d_B in the device res = cudaMalloc((void **) &d_A, size); CHECK_ERR(res); res = cudaMalloc((void **) &d_B, size); CHECK_ERR(res); //initial memory //res = cudaMemset(d_A, 0, size); //CHECK_ERR(res); //res = cudaMemset(d_B, 0, size); //CHECK_ERR(res); //copy memory to device res = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); CHECK_ERR(res); res = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); CHECK_ERR(res); //calling the kernel int threadPerBlock = 128; int blockPerGrid = ceil((float)(N*N)/(float)threadPerBlock); //printf("test... %d \n", blockPerGrid); kernel<<<blockPerGrid, threadPerBlock>>>(d_A, d_B, N); res = cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost); CHECK_ERR(res); //free memeory cudaFree(d_A); cudaFree(d_B); for(i=0; i<N; i++){ for(j=0; j<N; j++){ B[i][j] = h_B[i*N+j]; } } }
11,990
#include <iostream> #include <math.h> #define NUM_INPUTS 4 #define NUM_OUTPUTS 3 __global__ void dot(int outputs, int inputs, float *M, float *v, float *u){ int index = threadIdx.x; int stride = blockDim.x; for (int j = index; j < outputs; j += stride){ u[j] = 0.0f; for(int i = 0; i < inputs; i++){ u[j] += M[i + inputs*j]*v[i]; } } } __global__ void add(int n, float *u, float *v){ int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i += stride){ u[i] = u[i] + v[i]; } } int main() { float *inputs, *weights, *bias, *output; cudaMallocManaged(&inputs, NUM_INPUTS*sizeof(float)); cudaMallocManaged(&weights, NUM_INPUTS*NUM_OUTPUTS*sizeof(float)); cudaMallocManaged(&bias, NUM_OUTPUTS*sizeof(float)); cudaMallocManaged(&output, NUM_OUTPUTS*sizeof(float)); inputs[0] = 1.0f; inputs[1] = 2.0f; inputs[2] = 3.0f; inputs[3] = 2.5f; weights[0 + NUM_INPUTS*0] = 0.2f; //indexing as [x, y] = [x + NUM_INPUTS*y] weights[1 + NUM_INPUTS*0] = 0.8f; weights[2 + NUM_INPUTS*0] = -0.5f; weights[3 + NUM_INPUTS*0] = 1.0f; weights[0 + NUM_INPUTS*1] = 0.5f; weights[1 + NUM_INPUTS*1] = -0.91f; weights[2 + NUM_INPUTS*1] = 0.26f; weights[3 + NUM_INPUTS*1] = -0.5f; weights[0 + NUM_INPUTS*2] = -0.26f; weights[1 + NUM_INPUTS*2] = -0.27f; weights[2 + NUM_INPUTS*2] = 0.17f; weights[3 + NUM_INPUTS*2] = 0.87f; bias[0] = 2.0f; bias[1] = 3.0f; bias[2] = 0.5f; dot<<<1, NUM_OUTPUTS>>>(NUM_OUTPUTS, NUM_INPUTS, weights, inputs, output); add<<<1, NUM_OUTPUTS>>>(NUM_OUTPUTS, output, bias); cudaDeviceSynchronize(); std::cout << "[" << output[0] << ", " << output[1] << ", " << output[2] << "]" << std::endl; cudaFree(inputs); cudaFree(weights); cudaFree(bias); cudaFree(output); }
11,991
#include <stdio.h> #include <stdlib.h> #include <thrust/sort.h> #define thread 512 __global__ void rank_sort(int *data, int *result) { int i,j,position; position = 0; i = threadIdx.x; int self = data[i]; for(j = 0; j < thread; j++) { if(( self > data[j]) || (self == data[j]) && (j < i)) { position+=1; } } result[position] = self; } int main(int argc, char *argv[]){ int *arr,*data; int i; int Data[thread],sort[thread]; int size = sizeof(int)*thread; // (float *)malloc(blocks*threads_per_block*sizeof(float)); srand(123); printf(" Generate Ok\n"); for(i = 0; i < thread; i++) { Data[i] = rand() % 100; printf("%d ",Data[i]); } printf(" \n Working Ok\n"); cudaMalloc( (void**) &arr, size); cudaMalloc( (void**) &data, size); printf(" Malloc Ok\n"); // thrust::sort(arr, arr + size); cudaMemcpy(arr,Data,size, cudaMemcpyHostToDevice); printf(" Copy Ok\n"); rank_sort<<<1,thread>>>(arr,data); printf(" Function Ok\n"); cudaMemcpy(sort,data,size,cudaMemcpyDeviceToHost); printf(" Copy Back Ok\n"); printf(" Sorted Data \n"); for(i = 0 ; i < thread ; i++) { printf("%d ",sort[i]); } printf("\n"); printf(" Sorted OK \n"); cudaFree(data); cudaFree(arr); return 0; }
11,992
#include<stdio.h> __global__ void mean(int *a,int *b) { int id = blockDim.x * blockIdx.x + threadIdx.x; b[id] += a[id]; } int main() { int a[100],b[100]; int i,sum=0; int *dev_a,*dev_b; for(i=0;i<100;i++) { a[i] = 1; b[i] = 1; } printf("\n\t Printing Arrays : "); printf("Array A"); for(i=0;i<100;i++) { printf("\n\t %d" ,a[i]); } printf("Array B"); for(i=0;i<100;i++) { printf("\n\t %d" ,b[i]); } cudaMalloc(&dev_a,100*sizeof(int)); cudaMalloc(&dev_b,100*sizeof(int)); cudaMemcpy(dev_a,a,100*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_b,b,100*sizeof(int),cudaMemcpyHostToDevice); mean<<<1,100>>>(dev_a,dev_b); cudaMemcpy(&b,dev_b,100*sizeof(int),cudaMemcpyDeviceToHost); for(i=0;i<100;i++) { sum+=b[i]; } printf("\n\tSum = %d",sum); printf("\n\tMean = %d",sum/100); }
11,993
//Pattern Match Program using CUDA #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> __global__ void match(char* pattern, char* sequence, int* results, int patLength, int seqLength){ //get global thread id int idx = blockDim.x * blockIdx.x + threadIdx.x; //check bounds if (idx <= seqLength-patLength){ for(int i = 0; i < patLength; i++){ if (pattern[i] != sequence[idx+i]) //if a mismatch is found, exit. return; } atomicAdd(results,1); //match has been found so we add 1 to results } } int main(){ //device variables char* d_pattern; //device pattern char* d_sequence; //device sequence int* d_results; //device variable to store results //host variables int h_results; //host varible to store results const char* h_pattern = "GGATCGA"; const char* h_sequence = "GAATTGAATTCAGGATCGAGTTACAGTTAAATTCAGTTACGGATCGAAGTTA\n\ AGTTAAGTTAGAATATTCAGTGGATCGATACAGTTAAATTCAGTTACACAGT\n\ TGGATCGAAAGTTAAGTTAGAATATTCAGTTAGGAATTCAGGGATCGATTAC\n\ AGTTAAATTCAGTTTTAAGTTAATCAGTTAC"; int h_patLength = (int)strlen(h_pattern); //length of pattern int h_seqLength = (int)strlen(h_sequence); //length of sequence //Memory allocation of device variables cudaMalloc((void**)&d_pattern, h_patLength*sizeof(char)); cudaMalloc((void**)&d_sequence, h_seqLength*sizeof(char)); cudaMalloc((void**)&d_results, sizeof(int)); //Copy Host memory to Device Memory cudaMemcpy(d_pattern,h_pattern, h_patLength*sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy(d_sequence,h_sequence, h_seqLength*sizeof(char), cudaMemcpyHostToDevice); //Launch Kernel match<<<1,h_seqLength>>>(d_pattern,d_sequence,d_results,h_patLength, h_seqLength); //A grid with one block and strLength threads. //copy device results to host results cudaMemcpy(&h_results, d_results, sizeof(int), cudaMemcpyDeviceToHost); //print results printf("Total number of matches: %d\n", h_results); //free device memory cudaFree(d_pattern); cudaFree(d_sequence); cudaFree(d_results); return 0; }
11,994
#include<iostream> #include<ctime> using namespace std; #define O3 template<unsigned int BlockSize> __global__ void add(int*a,int*b,unsigned int n) { unsigned int tid = threadIdx.x; unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x * 8; int *idata = a + blockIdx.x * blockDim.x*8; if(idx + 7*blockDim.x < n) { a[idx] *= a[idx + blockDim.x]; a[idx] *= a[idx + 2*blockDim.x]; a[idx] *= a[idx + 3*blockDim.x]; a[idx] *= a[idx + 4*blockDim.x]; a[idx] *= a[idx + 5*blockDim.x]; a[idx] *= a[idx + 6*blockDim.x]; a[idx] *= a[idx + 7*blockDim.x]; } //if(idx + blockDim.x < n) a[idx] += a[idx + blockDim.x]; __syncthreads(); if(BlockSize >= 1024 && tid < 512) { idata[tid] *= idata[tid + 512]; } __syncthreads(); if(BlockSize >= 512 && tid < 256) { idata[tid] *= idata[tid + 256]; } __syncthreads(); if(BlockSize >= 256 && tid < 128) { idata[tid] *= idata[tid + 128]; } __syncthreads(); if(BlockSize >= 128 && tid < 64) { idata[tid] *= idata[tid + 64]; } __syncthreads(); if(tid < 32) { volatile int *vmem = idata; vmem[tid] *= vmem[tid + 32]; vmem[tid] *= vmem[tid + 16]; vmem[tid] *= vmem[tid + 8]; vmem[tid] *= vmem[tid + 4]; vmem[tid] *= vmem[tid + 2]; vmem[tid] *= vmem[tid + 1]; } if (tid == 0) {b[blockIdx.x] = idata[0];} } int main(int argc,char*argv[]) { int SIZE = 512; int N = 1<<20; dim3 block(SIZE,1); dim3 grid((block.x + N - 1)/block.x, 1); int a[N]; auto init = [&](auto* a,unsigned int size) -> void{ for(int i = 0;i < size;i++) { a[i] = 1; } }; int *a_dev, *ans_dev,ans[grid.x]; init(a, N); cudaMalloc((int**)(&a_dev),sizeof(int)*N); cudaMalloc((int**)(&ans_dev),sizeof(int)*grid.x); cudaMemcpy(a_dev, a, sizeof(int)*N, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); switch(SIZE){ case 512: clock_t start,end; start = clock(); add<512><<<grid.x/8, block>>>(a_dev, ans_dev, N); cudaDeviceSynchronize(); end = clock(); cout<<"GPU time : "<<end - start<<"ms"<<endl; break; } clock_t start,end; start = clock(); for(int i = 1;i < N;i++) { a[0] += a[i]; } cout<<a[0]<<endl; end = clock(); cout<<"CPU time : "<<end - start<<"ms"<<endl; cudaMemcpy (&ans, ans_dev, sizeof(int)*grid.x, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); int ret = 0; for (int i = 0;i < grid.x;i++) { ret += ans[i]; } cout<<ret<<endl; return 0; }
11,995
__device__ int __attribute__ ((noinline)) add(int *l, int *r, int i, int N) { if (i < N) { return l[i] + r[i] + add(l, r, N, N); } else { return l[0] + r[0]; } } extern "C" __global__ void vecAdd(int *l, int *r, int *p, size_t N, size_t iter) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; for (size_t i = 0; i < iter; ++i) { if (idx < N) { p[idx] = add(l, r, idx, N); } } }
11,996
#include "includes.h" __global__ void mapRandomNumbers ( const int nwl, const int ist, const int isb, const float *r, float *zr, int *kr, float *ru, int *kex ) { int i = threadIdx.x + blockDim.x * blockIdx.x; int rr; if ( i < nwl ) { rr = i + 0 * nwl + isb * 4 * nwl + ist * 4 * 2 * nwl; zr[i] = 1. / ACONST * powf ( r[rr] * ( ACONST - 1 ) + 1, 2. ); rr = i + 1 * nwl + isb * 4 * nwl + ist * 4 * 2 * nwl; kr[i] = ( int ) truncf ( r[rr] * ( nwl - 1 + 0.999999 ) ); rr = i + 2 * nwl + isb * 4 * nwl + ist * 4 * 2 * nwl; ru[i] = r[rr]; rr = i + 3 * nwl + isb * 4 * nwl + ist * 4 * 2 * nwl; kex[i] = ( int ) truncf ( r[rr] * ( 5 - 1 + 0.999999 ) ); } }
11,997
#include "stdio.h" int main(){ int n = 16; // host and device memory pointers int *h_a; int *d_a; // allocate host memory h_a = (int*)malloc(n * sizeof(int)); // allocate device memory cudaMalloc((void**)&d_a, n * sizeof(int)); // set device memory to all zero's cudaMemset(d_a, 0, n * sizeof(int)); // copy device memory back to host cudaMemcpy(h_a, d_a, n * sizeof(int), cudaMemcpyDeviceToHost); // print host memory for (int i = 0; i < n; i++){ printf("%d ", h_a[i]); } printf("\n"); // free buffers free(h_a); cudaFree(d_a); return 0; }
11,998
#include "includes.h" __global__ void dummyKernel() { }
11,999
/*Realizar un programa CUDA que dado un vector V de N números enteros multiplique a cada número por una constante C, se deben realizar dos implementaciones: a.Tanto C como N deben ser pasados como parámetros al kernel. b.Tanto C como N deben estar almacenados en la memoria de constantes de la GPU*/ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> //M and N number of threads (grid and block) #define M 1 #define N 2 __global__ void multiply( const int array[] , int dim,int result[], const int thread_number) { int index = blockIdx.x* blockDim.x* blockDim.y* blockDim.z+threadIdx.z* blockDim.y* blockDim.x+ threadIdx.y* blockDim.x+ threadIdx.x; //printf("sum:%i\n", result[0]); if(index<dim){ if(dim<=thread_number){ //if more threads than array size printf("Thread %i; Adding value of index %i\n", index, index, array[index]); atomicAdd(result,array[index]); } else{ //if less threads than array size if(index!=thread_number-1){//if not last thread deal with size_array/thread_nb array entries for(int i=index*(int)(dim/thread_number); i< index*(int)(dim/thread_number)+(int)(dim/thread_number); i++){ printf("Thread %i; Adding value of index %i\n", index, i, array[i]); atomicAdd(result,array[i]); } } else{ //if last thread deal with all remaining array entries for(int i=index*(int)(dim/thread_number); i< dim; i++){ //printf("Thread %i; Adding value of index %i\n",index, i, array[i]); atomicAdd(result,array[i]); } } } //printf("sum:%i\n", result[0]); } } int main(int argc, char *argv[]){ //Measure time clock_t time_begin; time_begin=clock(); // pointers to host & device arrays int *device_array = 0; int *host_array = 0; int size_array=9; int *d_sum=NULL; int *h_sum= 0; h_sum=( int*)malloc(sizeof( int)); h_sum[0]=0; // malloc a host array host_array = (int*)malloc( size_array * sizeof(int)); for(int i=0; i<size_array; i++){ host_array[i]=rand()%10; printf("%i\t", host_array[i]); } printf("\n"); printf("Sum of array: %i\n", h_sum[0]); // cudaMalloc a device array cudaMalloc(&device_array,size_array * sizeof(int)); cudaError_t er=cudaMalloc(&d_sum, sizeof(int)); // download and inspect the result on the host: cudaError_t e=cudaMemcpy(device_array, host_array, sizeof(int)*size_array, cudaMemcpyHostToDevice); cudaError_t error=cudaMemcpy(d_sum, h_sum, sizeof(int), cudaMemcpyHostToDevice); //cudaerrorinvalidvalue(11) dim3 bloque(N,N); //Bloque bidimensional de N*N hilos dim3 grid(M,M); //Grid bidimensional de M*M bloques int thread_number= N*N*M*M; multiply<<<grid, bloque>>>(device_array, size_array , d_sum, thread_number); cudaThreadSynchronize(); // download and inspect the result on the host: //cudaMemcpy(host_array, device_array, sizeof(int)*size_array, cudaMemcpyDeviceToHost); cudaMemcpy(h_sum, d_sum, sizeof(int), cudaMemcpyDeviceToHost); printf("Sum of array: %i\n", h_sum[0]); // deallocate memory free(host_array);free(h_sum); cudaFree(device_array); cudaFree(d_sum); printf("Time elapsed: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.215s }
12,000
#include <iostream> #include <cassert> using namespace std; class A { public: int *x; public: A() { this->x = new int(5); cout << &(this->x) << " : " << this->x << " : " << *(this->x) << endl; } ~A() { delete this->x; } }; int* x() { // int *x = (int*)malloc(sizeof(int)); int *x = new int; return x; }; int main() { // int *y = x(); // *y = 1; // cout << &y << " : " << y << " : " << *y << endl; // delete (y); // // assert( *y != NULL); // cout << &y << " : " << y << " : " << *y << endl; // { A* a = new A; cout << &(a->x) << " : " << a->x << " : " << *(a->x) << endl; cout << sizeof(float) ; // } // cout << &(a->x) << " : " << a->x << " : " << *(a->x) << endl; delete a; return 0; }