serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
21,801 | #include <cstdio>
int main() {
printf("Several Days of Cuda\n");
}
|
21,802 | #include "includes.h"
__global__ void add(int *a, int *b, int *sum)
{
*sum = *a + *b;
} |
21,803 | #include <cuda_runtime.h>
#include <vector>
#include <iostream>
#include <algorithm>
__inline__
__device__ int push(int* array, int* num, const int& element)
{
int oldvalue = atomicAdd(num, 1);
array[oldvalue] = element;
}
__global__ void Find3(int* a, int* results, int* N)
{
__shared__ int s_threes[1024];
__shared__ int threes_num;
int index = threadIdx.x;
if (threadIdx.x ==0 )
threes_num = 0;
__syncthreads();
if(a[index] ==3)
push(s_threes, &threes_num, index);
__syncthreads();
if(threadIdx.x < threes_num)
results[index] = s_threes[index];
*N = threes_num;
}
int main ()
{
int* a;
int* d_a, *d_results;
int* d_numberOfThrees;
int N = 1024;
a= (int*)malloc(N*sizeof(int));
cudaMalloc((void**)&d_a, N*sizeof(int));
cudaMalloc((void**)&d_results, N*sizeof(int));
cudaMalloc((void**)&d_numberOfThrees, sizeof(int));
std::vector<int> idOf3;
for (int i =0; i< N; ++i)
{
a[i] = i % 4;
if(a[i] == 3)
idOf3.push_back(i);
}
cudaMemcpy(d_a, a, sizeof(int)*N, cudaMemcpyHostToDevice);
Find3<<<1, 1024>>> (d_a, d_results, d_numberOfThrees);
int results[1024];
int numberOfThrees;
cudaMemcpy(results, d_results, sizeof(int)*N, cudaMemcpyDeviceToHost);
cudaMemcpy(&numberOfThrees, d_numberOfThrees, sizeof(int), cudaMemcpyDeviceToHost);
std::sort(idOf3.begin(), idOf3.end());
std::vector<int> idOf3_gpu(numberOfThrees);
for(int i =0; i< numberOfThrees; ++i)
{
idOf3_gpu[i] = results[i];
}
std::sort(idOf3_gpu.begin(), idOf3_gpu.end());
for(int i =0; i< numberOfThrees; ++i)
{
std::cout << idOf3_gpu[i] << "\t" << idOf3[i] << std::endl;
}
cudaFree(d_a);
cudaFree(d_results);
cudaFree(d_numberOfThrees);
free(a);
}
|
21,804 | __global__ void test(float *A){
int i = threadIdx.x;
for(int j = 0; j < 5; j++){
A[i] = A[i+1];
}
}
|
21,805 | __device__ void body_body_interaction(float4 point1, float4 point2, float3 *acceleration) {
float4 difference;
difference.x = point2.x - point1.x;
difference.y = point2.y - point1.y;
difference.z = point2.z - point1.z;
difference.w = 1.0f;
float distSqr = difference.x * difference.x + difference.y * difference.y + difference.z * difference.z + 1e-10;
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f / sqrtf(distSixth);
float s = point2.w * invDistCube;
acceleration->x += difference.x * s;
acceleration->y += difference.y * s;
acceleration->z += difference.z * s;
}
extern "C" __global__ void calculate_forces(const float4* positions, float3* accelerations, int num_points) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= num_points) {
return;
}
float4 current = positions[idx];
float3 acc = accelerations[idx];
for (int i = 0; i < num_points; i++) {
body_body_interaction(current, positions[i], &acc);
}
accelerations[idx] = acc;
}
|
21,806 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define N 1024
__global__ void stencil(float *d_a, float *d_b) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > 0 && tid < N - 1) {
d_b[tid] = 0.3333f * d_a[tid - 1] * d_a[tid] * d_a[tid + 1];
}
}
int main() {
float *h_a, *h_b;
float *d_a, *d_b;
int memSize = sizeof(float) * N;
//Reserve host memory
h_a = (float *) malloc(memSize);
h_b = (float *) malloc(memSize);
//Reserves device memory
cudaError_t error;
error = cudaMalloc((void **) &d_a, memSize);
if (error != cudaSuccess) {
fprintf(stderr, "Error al reservar memoria en la GPU\n");
return -1;
}
error = cudaMalloc((void **) &d_b, memSize);
if (error != cudaSuccess) {
fprintf(stderr, "Error al reservar memoria en la GPU\n");
return -1;
}
//Fills the arrays
for (int i = 0; i < N; ++i) {
h_a[i] = h_b[i] = 70.0f;
}
h_a[0] = h_a[N - 1] = h_b[0] = h_b[N - 1] = 150.0f;
//Copies host memory to device
error = cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
fprintf(stderr, "Error al transferir información\n");
return -1;
}
error = cudaMemcpy(d_b, h_b, memSize, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
fprintf(stderr, "Error al transferir información\n");
return -1;
}
//Grid Definition
dim3 block(N / 256);
dim3 thread(256);
float *aux = NULL;
for (int i = 0; i < N; ++i) {
stencil<<<block ,thread>>>(d_a, d_b);
aux = d_a;
d_a = d_b;
d_b = aux;
}
error = cudaMemcpy(h_a, d_a, memSize, cudaMemcpyDeviceToHost);
if (error != cudaSuccess) {
fprintf(stderr, "Error al transferir información\n");
return -1;
}
for (int i = 0; i < N; ++i) {
printf("%f, ", h_a[i]);
}
printf("\n");
free(h_a);
free(h_b);
cudaFree(d_a);
cudaFree(d_b);
return 0;
} |
21,807 | /*
Implementing inclusive Hillis & Steele plus scan in CUDA.
*/
#include <stdio.h>
#define NUM_THREADS 16
void serial_scan(unsigned int* in_array, unsigned int* out_array, const unsigned int size){
for(unsigned int i = 0; i < size; i++){
unsigned int sum = 0;
for(unsigned int j = 0; j <= i; j++){
sum += in_array[j];
}
out_array[i] = sum;
}
}
__global__ void scan(unsigned int* d_in){
unsigned int idx = threadIdx.x;
const unsigned int num_threads = blockDim.x;
for(unsigned int i = 1; i < num_threads; i *= 2){
if(idx >= i){
d_in[idx] += d_in[idx - i];
}
__syncthreads();
}
}
int main(){
const unsigned int BYTES = NUM_THREADS * sizeof(int);
unsigned int h_in [NUM_THREADS];
for(unsigned int i = 0; i < NUM_THREADS; i++){
h_in[i] = i + 1;
}
unsigned int h_out [NUM_THREADS];
unsigned int* d_in;
unsigned int* d_out;
cudaMalloc((void **) &d_in, BYTES);
cudaMalloc((void **) &d_out, BYTES);
cudaMemcpy(d_in, h_in, BYTES, cudaMemcpyHostToDevice);
scan<<<1, NUM_THREADS>>>(d_in);
cudaMemcpy(h_out, d_in, BYTES, cudaMemcpyDeviceToHost);
unsigned int true_output[NUM_THREADS];
serial_scan(h_in, true_output, NUM_THREADS);
printf("True: \n");
for(unsigned int i = 0; i < NUM_THREADS; i++){
printf("%d ", true_output[i]);
}
printf("\n");
printf("Output: \n");
for(unsigned int i = 0; i < NUM_THREADS; i++){
printf("%d ", h_out[i]);
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
21,808 |
/*
* This file is developed by Xuanzhi LIU (Walker LAU).
*
* If you want to get the latest version of this project or met any problems,
* please go to <https://github.com/WalkerLau/GPU-CNN> ,
* I will try to help as much as I can.
*
* You can redistribute this source codes and/or modify it under the terms of the BSD 2-Clause License.
*
* Note: the above information must be kept whenever or wherever the codes are used.
*
*/
#include "math_functions.cuh"
#define CONV1 3*9*9*48 // number of elements in filters of CONV1 is 3*9*9*48
#define CONV2 48*3*3*128 // number of elements in filters of CONV2 is 48*3*3*128
#define CONV3 128*3*3*128 // number of elements in filters of CONV3 is 128*3*3*128
#define CONV4 128*3*3*256 // number of elements in filters of CONV4 is 128*3*3*256
#define CONV5 256*3*3*192 // number of elements in filters of CONV5 is 256*3*3*192
#define CONV6 192*3*3*192 // number of elements in filters of CONV6 is 192*3*3*192
#define CONV7 192*3*3*128 // number of elements in filters of CONV7 is 192*3*3*128
// Match conv_layers
// matrix_procuct的输入分别为:输入数据首地址A、权重数据首地址B、输出数据首地址C、ofmap平面元素数量n、
// output volume的channel数m、一个filter的元素数量k。。。
__host__ void cuda_matrix_procuct(float* A, float* B, float* C, const int n,
const int m, const int k) {
switch(m*k){
case CONV1:{
//conv-net parameters
//const int stride = 4;
//const int src_w = 228;
//const int src_chn = 3;
//const int fil_w = 9;
//const int dst_w = 55;
//const int dst_chn = 48;
//convolute(A, B, C,
// stride, src_w, src_chn, fil_w, dst_w, dst_chn);
break;
}
case CONV2:{
//conv-net parameters
const int stride = 1;
const int src_w = 29;
const int src_chn = 48;
const int fil_w = 3;
const int dst_w = 27;
const int dst_chn = 128;
convolute(A, B, C,
stride, src_w, src_chn, fil_w, dst_w, dst_chn);
break;
}
case CONV3:{
//conv-net parameters
const int stride = 1;
const int src_w = 29;
const int src_chn = 128;
const int fil_w = 3;
const int dst_w = 27;
const int dst_chn = 128;
convolute(A, B, C,
stride, src_w, src_chn, fil_w, dst_w, dst_chn);
break;
}
case CONV4:{
//conv-net parameters
const int stride = 1;
const int src_w = 15;
const int src_chn = 128;
const int fil_w = 3;
const int dst_w = 13;
const int dst_chn = 256;
convolute(A, B, C,
stride, src_w, src_chn, fil_w, dst_w, dst_chn);
break;
}
case CONV5:{
//conv-net parameters
const int stride = 1;
const int src_w = 15;
const int src_chn = 256;
const int fil_w = 3;
const int dst_w = 13;
const int dst_chn = 192;
convolute(A, B, C,
stride, src_w, src_chn, fil_w, dst_w, dst_chn);
break;
}
case CONV6:{
//conv-net parameters
const int stride = 1;
const int src_w = 15;
const int src_chn = 192;
const int fil_w = 3;
const int dst_w = 13;
const int dst_chn = 192;
convolute(A, B, C,
stride, src_w, src_chn, fil_w, dst_w, dst_chn);
break;
}
case CONV7:{
//conv-net parameters
const int stride = 1;
const int src_w = 15;
const int src_chn = 192;
const int fil_w = 3;
const int dst_w = 13;
const int dst_chn = 128;
convolute(A, B, C,
stride, src_w, src_chn, fil_w, dst_w, dst_chn);
break;
}
default:
std::cout<<"ERROR! Can't match conv_layers!"<<std::endl;
}
}
__host__ void convolute(float* A, float* B, float* C,
const int stride ,
const int src_w ,
const int src_chn ,
const int fil_w ,
const int dst_w ,
const int dst_chn){
//con_layer params
const int dst_h = dst_w;
//cuda_Grid params
const int para_chn = 1; //should also modify memory allocation in conv_grid
const int block_num = dst_chn;
dim3 dimGrid(1,1,block_num);
dim3 dimBlock(dst_w, dst_h, 1);
//initiate C to be zeros
cudaMemset(C, 0, sizeof(float)*dst_chn*dst_h*dst_w);
//get the whole ofmaps volume
for(int ifm_lump = 0; ifm_lump < src_chn/para_chn; ++ifm_lump){
for(int ofm_lump = 0; ofm_lump < dst_chn/block_num; ++ofm_lump){
//get partial sum for block_num ofmaps
conv_grid<<<dimGrid, dimBlock>>>
(A, B, C, src_w, src_chn, fil_w, dst_w, ifm_lump, ofm_lump, block_num, stride, para_chn);
}
}
}
//get partial sum for block_num ofmaps
__global__ void conv_grid(data_t* A, float* B, float*C,
const int src_w ,
const int src_chn ,
const int fil_w ,
const int dst_w ,
const int ifm_lump,
const int ofm_lump,
const int block_num,
const int stride,
const int para_chn
){
//con_layer params
const int src_h = src_w;
const int fil_h = fil_w;
const int dst_h = dst_w;
//test time
//clock_t clk_start, cnt = 0;
//if(0 == bz && 0 == ty && 0 == tx) clk_start = clock();
//if(0 == bz && 0 == ty && 0 == tx) printf("....calculate = %ld\n",clock() - clk_start);
//grid index
int bz = blockIdx.z;
int ty = threadIdx.y; int tx = threadIdx.x;
int tid = ty*blockDim.x + tx;
//allocate shared memory & registers
__shared__ data_t ifmaps[1*29*29]; //[para_chn * src_h * src_w]
float filters[1*3*3]; //[para_chn * fil_h * fil_w]
float res = 0;
//load ifmaps
for(int i = 0; i*dst_h*dst_w < para_chn*src_h*src_w; ++i){
if(i*dst_h*dst_w + tid < para_chn*src_h*src_w){
ifmaps[i*dst_h*dst_w + tid] = A[ifm_lump*para_chn*src_h*src_w + i*dst_h*dst_w + tid];
}
}
//load filters
for(int i = 0; i < para_chn*fil_h*fil_w; ++i){
filters[i] = B[(ofm_lump*block_num*src_chn + ifm_lump*para_chn + bz*src_chn)*fil_h*fil_w + i];
}
//calculate partial sum
for(int c = 0, k = 0; c < para_chn; ++c){
for(int h = 0; h < fil_h; ++h){
for(int w = 0; w < fil_w; ++w){
res += ifmaps[c*src_h*src_w + ty*stride*src_w + tx*stride + h*src_w + w] * filters[k];
++k;
}
}
}
//write C
C[ofm_lump*block_num*dst_h*dst_w + bz*dst_h*dst_w + ty*dst_w + tx] += res;
}
//FC layer's blockDim
#define PARA 64
__host__ void cuda_fc_wrapper(const float* A, const float* B, float* C,
const int vec_len,
const int dst_chn
){
//copy data to GPU global memory
float* src;
float* fil;
float* dst;
cudaMalloc((void **)&src, vec_len * sizeof(float));
cudaMalloc((void **)&fil, vec_len * dst_chn * sizeof(float));
cudaMalloc((void **)&dst, dst_chn * sizeof(float));
cudaMemcpy(src, A, vec_len * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(fil, B, vec_len * dst_chn * sizeof(float), cudaMemcpyHostToDevice);
//blcokDim
const int para = PARA; //should also modify shared memory in cuda_fc
//configure fc info & calculate
if(4096 == dst_chn){
cuda_fc<<<4096, para>>>(src, fil, dst, vec_len, dst_chn);
}
else if(2048 == dst_chn){
cuda_fc<<<2048, para>>>(src, fil, dst, vec_len, dst_chn);
}
else{
std::cout<<"ERROR! Cannot match FC layer!"<<std::endl;
}
//copy result to host
cudaMemcpy(C, dst, dst_chn * sizeof(float), cudaMemcpyDeviceToHost);
//free Malloc
cudaFree(dst);
cudaFree(fil);
cudaFree(src);
}
__global__ void cuda_fc(const float* A, const float* B, float* C,
const int vec_len,
const int dst_chn
){
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int para = blockDim.x; //src size
//allocate memory
__shared__ float src[PARA]; //max src size = [para]
__shared__ float fil[PARA]; //max fil size = [para]
//load & calculate
__shared__ float res;
if(0 == tx){res = 0.0;}
for(int i = 0; i*para < vec_len; ++i){
src[tx] = A[i*para + tx];
fil[tx] = B[bx*vec_len + i*para + tx];
__syncthreads();
if(0 == tx){
for(int k = 0; k < para; ++k){
res += src[k] * fil[k];
}
}
__syncthreads();
}
//write C
if(0 == tx){
C[bx] = res;
}
} |
21,809 | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
__global__ void test()
{
}
extern "C"
void cutest()
{
} |
21,810 | __global__ void get_w_combo(float *a,float*b, float *w, const unsigned int r, const unsigned int Y ,const unsigned int c )
{
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if(row < r && col <c) {
float temp = 0;
for (int k = 0; k < Y; k++) {
temp+= a[row * Y + k] * b[k * c + col];
}
float temp1= w[row * c + col];
float temp3 = 2 * 50000 * temp1;
float temp4 = (temp/200) + temp3;
float temp5 = 0.0000001 * temp4;
w[row * c + col] = temp1 - temp5;
}
}
|
21,811 | #include <pthread.h>
#include <stdio.h>
#include <iostream>
//const int N = 1 << 20;
const int N = 10;
__global__ void kernel(float *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
x[i] = sqrt(pow(3.14159,i));
}
}
__global__ void idxTest(float *x, float *data, int n, uint incr)
{
size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
size_t stride = blockDim.x * gridDim.x;
while (tid < n)
{
//x[tid] = static_cast<float>(tid);
x[tid] = data[tid] + incr;
tid += stride;
}
}
/*int main()
{
const int num_streams = 8;
cudaStream_t streams[num_streams];
float *data[num_streams];
for (int i = 0; i < num_streams; i++) {
cudaStreamCreate(&streams[i]);
cudaMalloc(&data[i], N * sizeof(float));
// launch one worker kernel per stream
kernel<<<1, 64, 0, streams[i]>>>(data[i], N);
// launch a dummy kernel on the default stream
kernel<<<1, 1>>>(0, 0);
}
cudaDeviceReset();
return 0;
}*/
struct cuda_streams_arg {
cudaStream_t *stream;
float *data;
uint threadId;
};
void *launch_kernel(void *args)
{
cuda_streams_arg *thread_arg = static_cast<cuda_streams_arg *>(args);
cudaStream_t *currentStream = static_cast<cudaStream_t *>(thread_arg->stream);
cudaStreamCreate(currentStream);
float *data;
cudaMalloc(&data, N * sizeof(float));
//kernel<<<1, 64, 0, *currentStream>>>(data, thread_arg->data, N);
idxTest<<<2, 4, 0, *currentStream>>>(data, thread_arg->data, N, thread_arg->threadId);
cudaStreamSynchronize(0);
float *host_data = (float *)(malloc(N * sizeof(float)));
cudaMemcpy(host_data, data, N*sizeof(float), cudaMemcpyDeviceToHost);
for(int i = 0; i < N; ++i)
std::cout << host_data[i] << " ";
std::cout << std::endl;
return NULL;
}
int main()
{
const int num_threads = 4;
pthread_t threads[num_threads];
cudaStream_t streams[num_threads];
cuda_streams_arg stream_args[num_threads];
float host_globalData[N];
for (size_t idx = 0; idx < N; ++idx)
{
host_globalData[idx] = idx*2;
std::cout << host_globalData[idx] << " ";
}
std::cout << std::endl;
float *dev_globalData;
cudaMalloc(&dev_globalData, 10*sizeof(float));
cudaMemcpy(dev_globalData, host_globalData, 10*sizeof(float), cudaMemcpyHostToDevice);
for (int i = 0; i < num_threads; i++) {
stream_args[i].stream = &streams[i];
stream_args[i].data = dev_globalData;
stream_args[i].threadId = i;
if (pthread_create(&threads[i], NULL, launch_kernel, &stream_args[i])) {
fprintf(stderr, "Error creating threadn");
return 1;
}
}
for (int i = 0; i < num_threads; i++) {
if(pthread_join(threads[i], NULL)) {
fprintf(stderr, "Error joining threadn");
return 2;
}
}
cudaDeviceReset();
return 0;
}
|
21,812 | #include "includes.h"
/* Program Parameters */
#define MAXN 8000 /* Max value of N */
int N; /* Matrix size */
// Thread block size
#define BLOCK_SIZE 16
/* Matrices */
float A[MAXN][MAXN], B[MAXN][MAXN];
/* junk */
#define randm() 4|2[uid]&3
/* Prototype */
/* ------------------ Cuda Code --------------------- */
/****** You will replace this routine with your own parallel version *******/
/* Provided global variables are MAXN, N, A[][] and B[][],
* defined in the beginning of this code. B[][] is initialized to zeros.
*/
/* returns a seed for srand based on the time */
__global__ void matrixMean(float* d_in, float* d_mean, int N)
{
extern __shared__ float sdata[];
//each thread loads one element from global to shared mem
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int tid = threadIdx.y;
unsigned int i = idx_y * N + idx_x;
sdata[tid] = d_in[i];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.y; s *= 2)
{
if(tid +s < N)
{
if(tid % (2*s) == 0)
{
sdata[tid] += sdata[tid + s];
}
}
__syncthreads();
}
// write result for this block to global mem
if(tid == 0)
{
d_mean[blockIdx.x] = sdata[0]/(float) N;
}
} |
21,813 | #include "includes.h"
/*
* CCL3D.cu
*/
#define CCL_BLOCK_SIZE_X 8
#define CCL_BLOCK_SIZE_Y 8
#define CCL_BLOCK_SIZE_Z 8
__device__ int d_isNotDone;
__global__ void analyseLabels(int* labels, int w, int h, int d) {
const int x = blockIdx.x * CCL_BLOCK_SIZE_X + threadIdx.x;
const int y = blockIdx.y * CCL_BLOCK_SIZE_Y + threadIdx.y;
const int z = blockIdx.z * CCL_BLOCK_SIZE_Z + threadIdx.z;
const int index = (z*h + y)*w + x;
if (x >= w || y >= h || z >= d) return;
int lcur = labels[index];
if (lcur) {
int r = labels[lcur];
while(r != lcur) {
lcur = labels[r];
r = labels[lcur];
}
labels[index] = lcur;
}
} |
21,814 | #include "includes.h"
__global__ void rgb2yuvKernel(int *imgr,int *imgg,int *imgb,int *imgy,int *imgcb,int *imgcr, int n) {
int r, g, b;
int y, cb, cr;
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n){
r = imgr[index];
g = imgg[index];
b = imgb[index];
y = (int)( 0.299*r + 0.587*g + 0.114*b);
cb = (int)(-0.147*r - 0.289*g + 0.436*b);
cr = (int)( 0.615*r - 0.515*g - 0.1*b);
imgy[index] = y;
imgcb[index] = cb;
imgcr[index] = cr;
}
} |
21,815 | __global__ void local_averages_kernel(float * A, float * B, int size_B)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if ( index < size_B )
{
float temp = 0.0;
for ( int j = 0; j < 4; j++ )
{
temp = temp + A[(index * 4) + j];
}
B[index] = temp / 4.0;
}
} |
21,816 | #include <math.h>
__global__ void calcGradientGPU(int *image, int *gradientMag, int *gradientDir, int width, int height, int threshold){
int mask[9] = { -width - 1, -width, -width + 1,
-1, 0, 1,
width -1, width, width + 1 };
int GxMask[9] = { -1, 0, 1,
-2, 0, 2,
-1, 0, 1 };
int GyMask[9] = { 1, 2, 1,
0, 0, 0,
-1, -2, -1 };
int i = blockIdx.y*blockDim.y + threadIdx.y;
int j = blockIdx.x*blockDim.x + threadIdx.x;
if ( i >= height-1 || j >= width-1) return;
if (i >0 && j > 0){
int a;
int Gx = 0;
int Gy = 0;
for(a=0; a < 9; a++){
Gx += GxMask[a]*image[i*width + j + mask[a]];
Gy += GyMask[a]*image[i*width + j + mask[a]];
}
float angle = M_PI/2;
if (Gx != 0) angle = atan((float)Gy/(float)Gx);
if (angle < 0 ) angle += M_PI;
gradientDir[i*width + j] = (int)(( angle)*((float)5/(float)M_PI) - 0.1)%4;
int mag = abs(Gx) + abs(Gy);
if (mag > threshold){
gradientMag[i*width + j] = mag/6;
}
else{
gradientMag[i*width + j] = 0;
}
}
}
|
21,817 | #include <stdio.h>
#include <iostream>
#include <cstdlib>
#include <algorithm>
using namespace std;
__device__ float atomicMaxFloat(float* addr, float val) {
int *addrAsInt = (int *) addr;
int old = *addrAsInt ;
while(val > __int_as_float(old)) {
old = atomicCAS(addrAsInt, old, __float_as_int(val));
}
return __int_as_float(old);
}
// Please note that __syncthreads() can only synchronize threads in a
// particular block
__global__ void getMax(float *d_in, float *d_max) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
// if (d_in[gid] > d_max[0])
// d_max[0] = d_in[gid];
atomicMaxFloat(d_max, d_in[gid]);
}
int main() {
srand(time(NULL));
const int ARRAY_SIZE = 10;
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) h_in[i] = float(i)+0.2;
random_shuffle(&h_in[0], &h_in[ARRAY_SIZE]);
float h_max[1];
//http://stackoverflow.com/questions/14720134/is-it-possible-to-random-shuffle-an-array-of-int-elements
const dim3 blkDim(1, 1, 1);
const dim3 grdDim(ARRAY_SIZE, 1, 1);
size_t fsize = sizeof(float);
float *d_in;
cudaMalloc(&d_in, fsize*ARRAY_SIZE);
cudaMemcpy(d_in, h_in, fsize*ARRAY_SIZE, cudaMemcpyHostToDevice);
float *d_max;
cudaMalloc(&d_max, fsize);
cudaMemcpy(d_max, h_in, fsize, cudaMemcpyHostToDevice);
// launch the kernel
getMax<<<grdDim, blkDim>>>(d_in, d_max);
cudaMemcpy(h_max, d_max, fsize, cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_max);
cout << h_max[0] << endl;
}
|
21,818 | #include <stdio.h>
#define N 16
#define BLOCK_SIZE 4
__global__ void transpose(int *input,int *output){
__shared__ int sharedMemory[BLOCK_SIZE][BLOCK_SIZE + 1];
//global index
int indexX = threadIdx.x + blockIdx.x*blockDim.x;
int indexY = threadIdx.y + blockIdx.y*blockDim.y;
//transposed index
int tindexX = threadIdx.x + blockIdx.y*blockDim.x;
int tindexY = threadIdx.y + blockIdx.x*blockDim.y;
//local index
int localIndexX = threadIdx.x;
int localIndexY = threadIdx.y;
int index = indexX*N + indexY;
int transposedIndex = tindexY*N + tindexX;
sharedMemory[localIndexX][localIndexY] = input[index];
__syncthreads();
output[transposedIndex] = sharedMemory[localIndexY][localIndexX];
}
void fill_data(int *data){
for(int idx=0;idx < N*N;idx++)
data[idx] = idx;
}
void print_matrix(int *data,int n){
for(int i = 0;i < n;i++){
for(int j = 0;j < n;j++){
printf("%4d ",data[i*n + j]);
}
printf("\n");
}
}
int main(void){
int *a,*b;
int *d_a,*d_b;
int size = N*N*sizeof(int);
a = (int*)malloc(size);
b = (int*)malloc(size);
fill_data(a);
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE,1);
dim3 gridSize(N/BLOCK_SIZE,N/BLOCK_SIZE,1);
transpose<<<blockSize,gridSize>>>(d_a,d_b);
cudaMemcpy(b,d_b,size,cudaMemcpyDeviceToHost);
printf("Original:\n");
print_matrix(a,N);
printf("Transposed:\n");
print_matrix(b,N);
free(a);
free(b);
cudaFree(d_a);
cudaFree(d_b);
}
|
21,819 | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <math.h>
#include <time.h>
#include <curand_kernel.h>
#define ROUNDS 1000000
#define BLOCKS 512
#define GRIDS 1
double uniform(double a, double b){
return rand() / (RAND_MAX + 1.0) * (b - a) + a;
}
__global__ void gpu_monte_carlo(float *pi, curandState *states) {
unsigned int t_rank = threadIdx.x + blockDim.x * blockIdx.x;
long in_circle = 0;
float x, y;
curand_init(1234, t_rank, 0, &states[t_rank]); // Initialize CURAND
for(long i = 0; i < ROUNDS; i++) {
x = curand_uniform (&states[t_rank]);
y = curand_uniform (&states[t_rank]);
in_circle += x*x + y*y <= 1.0f ? 1 : 0; // count if x & y is in the circle.
}
pi[t_rank] = (float) in_circle / ROUNDS * 4.0; // return estimate of pi
}
double cpu_monte_carlo(long n) {
double x, y;
long in_circle;
double r = 5.0;
float a = -r,b = r;
for(long i = 0; i < n; i++) {
x = uniform(a,b);
y = uniform(a,b);
in_circle += x*x + y*y <= r*r ? 1 : 0;
}
return (double) in_circle / n * 4;
}
int main (int argc, char *argv[]) {
clock_t startgpu, stopgpu,startcpu,stopcpu;
float host[BLOCKS * GRIDS];
float *dev;
curandState *devStates;
startcpu = clock();
float pi_cpu = cpu_monte_carlo(BLOCKS * GRIDS * ROUNDS);
stopcpu = clock();
printf("Pi = %f CPU pi calculated in %f s.\n", pi_cpu,(stopcpu-startcpu)/(float)CLOCKS_PER_SEC);
startgpu = clock();
cudaMalloc((void **) &dev, BLOCKS * GRIDS * sizeof(float)); // allocate device mem. for counts
cudaMalloc( (void **)&devStates, GRIDS * BLOCKS * sizeof(curandState) );
gpu_monte_carlo<<<BLOCKS, GRIDS>>>(dev,devStates);
cudaMemcpy(host, dev, BLOCKS * GRIDS * sizeof(float), cudaMemcpyDeviceToHost); // return results
float pi_gpu;
for(int i = 0; i < BLOCKS * GRIDS; i++) {
pi_gpu += host[i];
}
pi_gpu /= (BLOCKS * GRIDS);
stopgpu = clock();
printf("Pi = %f GPU pi calculated in %f s.\n", pi_gpu,(stopgpu-startgpu)/(float)CLOCKS_PER_SEC);
return 0;
} |
21,820 | #include <stdlib.h>
#include <stdio.h>
void init_matrix(int m, int n, double *mat, double value)
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
mat[i * m + j] = value;
}
void init_vector(int m, double *v, double value)
{
int i;
for (i = 0; i < m; i++)
v[i] = value;
}
double *malloc_2d(int m, int n)
{
if (m <= 0 || n <= 0)
return NULL;
double *mat = (double *)malloc(m * n * sizeof(double));
if (mat)
return mat;
return NULL;
}
void print_matrix(int m, int n, double *mat)
{
int i, j;
for (i = 0; i < m; i++)
{
for (j = 0; j < n; j++)
{
if (j == 0)
printf("|");
printf(" %.2f ", mat[i * m + j]);
if (j == n - 1)
printf("|");
}
printf("\n");
}
printf("\n\n");
}
void print_vector(int m, double* v)
{
int i;
for (i = 0; i < m; i++)
printf("%.3f\n", v[i]);
printf("\n\n");
} |
21,821 | #include "includes.h"
# define MAX(a, b) ((a) > (b) ? (a) : (b))
# define GAUSSIAN_KERNEL_SIZE 3
# define SOBEL_KERNEL_SIZE 5
# define TILE_WIDTH 32
# define SMEM_SIZE 128
__global__ void lowHysterisis(int width, int height, float *d_nonMax, float* d_highThreshHyst, float lowThreshold, float *d_lowThreshHyst) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
if ((ix > 0) && (ix < (width - 1)) && (iy > 0) && iy < (height - 1)) {
int tid = iy * width + ix;
d_lowThreshHyst[tid] = d_highThreshHyst[tid];
if (d_highThreshHyst[tid] == 1) {
// Determine neighbour indices
int eastN = tid + 1;
int westN = tid - 1;
int northN = tid - width;
int southN = tid + width;
int southEastN = southN + 1;
int northEastN = northN + 1;
int southWestN = southN - 1;
int northWestN = northN - 1;
if (d_nonMax[eastN] > lowThreshold)
d_lowThreshHyst[eastN] = 1.0f;
if (d_nonMax[westN] > lowThreshold)
d_lowThreshHyst[westN] = 1.0f;
if (d_nonMax[northN] > lowThreshold)
d_lowThreshHyst[northN] = 1.0f;
if (d_nonMax[southN] > lowThreshold)
d_lowThreshHyst[southN] = 1.0f;
if (d_nonMax[southEastN] > lowThreshold)
d_lowThreshHyst[southEastN] = 1.0f;
if (d_nonMax[northEastN] > lowThreshold)
d_lowThreshHyst[northEastN] = 1.0f;
if (d_nonMax[southWestN] > lowThreshold)
d_lowThreshHyst[southWestN] = 1.0f;
if (d_nonMax[northWestN] > lowThreshold)
d_lowThreshHyst[northWestN] = 1.0f;
}
}
} |
21,822 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define N 10000
int main()
{
int sum = 0;
double x, y;
double start, end;
start = clock();
for (int i = 0; i < N; i++)
{
x = (double) rand() / RAND_MAX;
y = (double) rand() / RAND_MAX;
if(x*x + y*y < 1)
sum++;
}
end = clock();
printf("PI = %f\n", (double) 4 * sum / (N - 1));
printf("Cost time %lf sec. \n", (double)(end - start)/CLOCKS_PER_SEC);
return 0;
} |
21,823 | #include <stdio.h>
__global__ void hello(){
printf("Hey there! from block %d, (Threads in block: %d, Blocks: %d)\n",
blockIdx.x, blockDim.x, gridDim.x);
}
int main(int argc, char ** argv) {
// lunch kernel with 16 blocks and 1 thread each block
hello<<<16, 1>>>();
// force printf's to flush
cudaDeviceSynchronize();
printf("That's it\n");
return 0;
} |
21,824 | #include "includes.h"
__global__ void stencil(int *in, int *out)
{
int globIdx = blockIdx.x * blockDim.x + threadIdx.x;
int value = 0;
for(int offset = -RADIUS; offset <= RADIUS; offset++)
value += in[globIdx + offset];
out[globIdx] = value;
} |
21,825 |
#include <stdio.h>
#define SIZE 2050
#define DIVUP(a,b) (a % b) == 0 ? (a / b) : (a / b) + 1
__global__ void VectorAddKernel(float * Vector1, float * Vector2, float * Output, int size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
Output[idx] = Vector1[idx] + Vector2[idx];
}
int main()
{
float HostVector1[SIZE];
float HostVector2[SIZE];
float HostOutputVector[SIZE];
for(int i=0;i<SIZE;i++)
{
HostVector1[i] = i;
HostVector2[i] = i;
}
float * GPUVector1;
float * GPUVector2;
float * GPUOutputVector;
cudaError_t err;
err = cudaMalloc((void**)&GPUVector1,SIZE*sizeof(float));
err = cudaMalloc((void**)&GPUVector2,SIZE*sizeof(float));
err = cudaMalloc((void**)&GPUOutputVector,SIZE*sizeof(float));
err = cudaMemcpy(GPUVector1,HostVector1,SIZE*sizeof(float),cudaMemcpyHostToDevice);
err = cudaMemcpy(GPUVector2,HostVector2,SIZE*sizeof(float),cudaMemcpyHostToDevice);
dim3 BlockDim(64,1,1);
dim3 GridDim(DIVUP(SIZE,BlockDim.x),1,1);
VectorAddKernel<<<GridDim,BlockDim>>>(GPUVector1,GPUVector2,GPUOutputVector,SIZE);
// Do other stuff...
cudaThreadSynchronize();
err = cudaMemcpy(HostOutputVector,GPUOutputVector,SIZE*sizeof(float),cudaMemcpyDeviceToHost);
err = cudaFree(GPUVector1);
err = cudaFree(GPUVector2);
err = cudaFree(GPUOutputVector);
for(int i=0;i<SIZE; i++)
printf("%8.3f\n",HostOutputVector[i]);
}
|
21,826 | /*
** Projeto de Algoritmos Paralelos
** Multiplicação de Matrizes
*/
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <cuda_profiler_api.h>
#define TAM_BLOCO 16
__global__ void cuda_multiplicarmatriz(float* M, float* N, float* R, int tamM, int tamN) {
//índice do bloco
int bx = blockIdx.x;
int by = blockIdx.y;
// índice da thread
int tx = threadIdx.x;
int ty = threadIdx.y;
// índice da primeira submatriz de M processado pelo bloco
int mComeco = tamM * TAM_BLOCO * by;
// índice da última submatriz de M processada pelo bloco
int mFim = mComeco + tamM - 1;
// Tamanho do passo utilizado para interar através das submatrizes de M
int mPasso = TAM_BLOCO;
// Índice da primeira submatriz de N processada pelo bloco
int nComeco = TAM_BLOCO * bx;
// Tamanho do passo utilizado para interar através das submatrizes de N
int nPasso = TAM_BLOCO * tamN;
// O elemento computado pela thread
float rRes = 0;
// Varre por todas as submatrizes de M e N requeridas
// para computar o bloco de submatriz
for (int m = mComeco, n = nComeco; m <= mFim; m += mPasso, n += nPasso) {
// Memoria compartilhada para a submatriz de M
__shared__ float Msub[TAM_BLOCO][TAM_BLOCO];
// Memoria compartilhada para a submatriz de N
__shared__ float Nsub[TAM_BLOCO][TAM_BLOCO];
// Carrega as matrizes da memória global para a memória
// compartilhada. Cada thread carreg um elemento de cada
// matriz
Msub[ty][tx] = M[m + tamM * ty + tx];
Nsub[ty][tx] = N[n + tamN * ty + tx];
// Sincroniza para garantir que todas as matrizes foram
// carregadas
__syncthreads();
// Multiplica as duas matrizes.
// Cada thread computa um elemento
// do bloco da submatriz
for (int i = 0; i < TAM_BLOCO; ++i)
rRes += Msub[ty][i] * Nsub[i][tx];
// Sincroniza para grantir que a computação de multiplicação
// está feita antes de carregar duas novas submatrizes de
// M e N na próxima interação
__syncthreads();
}
// Esscre o bloco da sumatriz na memória global
// Cada thread escreve um único elemento
int r = tamN * TAM_BLOCO * by + TAM_BLOCO * bx;
R[r + tamN * ty + tx] = rRes;
}
// Função para rodar na CPU
// Computa R = M * N
// aM é a altura de M
// lM é a largura de M
// lN é a largura de N
void multiplicar(const float* M, const float* N, float* R, int aM, int lM, int lN) {
int tam;
// Carrega M e N para a GPU
float* Md;
tam = aM * lM * sizeof(float);
cudaMalloc((void**)&Md, tam);
cudaMemcpy(Md, M, tam, cudaMemcpyHostToDevice);
float* Nd;
tam = lM * lN * sizeof(float);
cudaMalloc((void**)&Nd, tam);
cudaMemcpy(Nd, N, tam, cudaMemcpyHostToDevice);
// Aloca R na GPU
float* Rd;
tam = aM * lN * sizeof(float);
cudaMalloc((void**)&Rd, tam);
// Computa a configuração da execução assumindo que
// as dimensões das matrizes são múltiplos de TAM_BLOCO
dim3 dimBlock(TAM_BLOCO, TAM_BLOCO);
dim3 dimGrid(lN / dimBlock.x, aM / dimBlock.y);
// Processa a computação na GPU
cuda_multiplicarmatriz<<<dimGrid, dimBlock>>>(Md, Nd, Rd, lM, lN);
// Carrega R da GPU
cudaMemcpy(R, Rd, tam, cudaMemcpyDeviceToHost);
// Limpa a memória da GPU
cudaFree(Md);
cudaFree(Nd);
cudaFree(Rd);
}
int checkGpu() {
int count;
cudaError_t erro;
cudaProfilerStart();
erro = cudaGetDeviceCount(&count);
if (erro != cudaSuccess) {
printf("Erro: %s\n", cudaGetErrorString(erro));
return 0;
}
if (count < 1) {
printf("Erro: %s\n", "Este computador não possui um dispositivo com GPU compatível com CUDA disponível.");
return 0;
}
return 1;
}
void matriz_preencher(float* A, int tam) {
for (int i = 0; i < tam*tam; i++)
A[i] = (float)(rand() % 100);
}
void matriz_exibir(float* A, int tam) {
for (int i = 0; i < tam; i++) {
for (int j = 0; j < tam; j++) {
printf("%0.2f ", A[tam*i+j]);
}
printf("\n");
}
}
int main(int argc, const char * argv[]){
float A[TAM_BLOCO*TAM_BLOCO];
float B[TAM_BLOCO*TAM_BLOCO];
float C[TAM_BLOCO*TAM_BLOCO];
int aA = TAM_BLOCO;
int lA = TAM_BLOCO;
int lB = TAM_BLOCO;
if (!checkGpu())
exit(EXIT_FAILURE);
srand(time(NULL));
matriz_preencher(A, aA);
matriz_preencher(B, aA);
printf("Matriz A\n");
matriz_exibir(A, aA);
printf("Matriz B\n");
matriz_exibir(B, aA);
multiplicar(A, B, C, aA, lA, lB);
printf("RESULTADO\n");
matriz_exibir(C, aA);
cudaDeviceReset();
exit(EXIT_SUCCESS);
} |
21,827 | /**
* Nearest neighbor search
* マップ内に店ゾーンが20%の確率で配備されている時、
* 住宅ゾーンから直近の店ゾーンまでのマンハッタン距離を計算する。
* Kd-treeなどのアルゴリズムだと、各住宅ゾーンから直近の店までの距離の計算にO(log M)。
* 従って、全ての住宅ゾーンについて調べると、O(N log M)。
* 一方、本実装では、各店ゾーンから周辺ゾーンに再帰的に距離を更新していくので、O(N)で済む。
* しかも、GPUで並列化することで、さらに計算時間を短縮できる。
*/
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <list>
#include <time.h>
#define CITY_SIZE 400
#define NUM_GPU_BLOCKS 4
#define NUM_GPU_THREADS 32
#define NUM_FEATURES 1
struct ZoneType {
int type;
int level;
};
struct ZoningPlan {
ZoneType zones[CITY_SIZE][CITY_SIZE];
};
struct DistanceMap {
int distances[CITY_SIZE][CITY_SIZE][NUM_FEATURES];
};
struct Point2D {
int x;
int y;
__host__ __device__
Point2D() : x(0), y(0) {}
__host__ __device__
Point2D(int x, int y) : x(x), y(y) {}
};
__host__ __device__
unsigned int rand(unsigned int* randx) {
*randx = *randx * 1103515245 + 12345;
return (*randx)&2147483647;
}
__host__ __device__
float randf(unsigned int* randx) {
return rand(randx) / (float(2147483647) + 1);
}
__host__ __device__
float randf(unsigned int* randx, float a, float b) {
return randf(randx) * (b - a) + a;
}
__host__ __device__
int sampleFromCdf(unsigned int* randx, float* cdf, int num) {
float rnd = randf(randx, 0, cdf[num-1]);
for (int i = 0; i < num; ++i) {
if (rnd <= cdf[i]) return i;
}
return num - 1;
}
__host__ __device__
int sampleFromPdf(unsigned int* randx, float* pdf, int num) {
if (num == 0) return 0;
float cdf[40];
cdf[0] = pdf[0];
for (int i = 1; i < num; ++i) {
if (pdf[i] >= 0) {
cdf[i] = cdf[i - 1] + pdf[i];
} else {
cdf[i] = cdf[i - 1];
}
}
return sampleFromCdf(randx, cdf, num);
}
/**
* ゾーンプランを生成する。
*/
__host__
void generateZoningPlan(ZoningPlan& zoningPlan, std::vector<float> zoneTypeDistribution, std::vector<Point2D>& hostStoreLocations) {
std::vector<float> numRemainings(zoneTypeDistribution.size());
for (int i = 0; i < zoneTypeDistribution.size(); ++i) {
numRemainings[i] = CITY_SIZE * CITY_SIZE * zoneTypeDistribution[i];
}
unsigned int randx = 0;
for (int r = 0; r < CITY_SIZE; ++r) {
for (int c = 0; c < CITY_SIZE; ++c) {
int type = sampleFromPdf(&randx, numRemainings.data(), numRemainings.size());
zoningPlan.zones[r][c].type = type;
numRemainings[type] -= 1;
switch (type) {
case 0:
break;
case 1:
hostStoreLocations.push_back(Point2D(c, r));
break;
}
}
}
}
/**
* 直近の店までの距離を計算する(マルチスレッド版)
*/
__global__
void computeDistanceToStore(ZoningPlan* zoningPLan, DistanceMap* distanceMap) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
// キュー
Point2D queue[1000];
int queue_begin = 0;
int queue_end = 0;
int stride = ceilf((float)(CITY_SIZE * CITY_SIZE) / NUM_GPU_BLOCKS / NUM_GPU_THREADS);
// 分割された領域内で、店を探す
for (int i = 0; i < stride; ++i) {
int r = (idx * NUM_GPU_BLOCKS * NUM_GPU_THREADS + i) / CITY_SIZE;
int c = (idx * NUM_GPU_BLOCKS * NUM_GPU_THREADS + i) % CITY_SIZE;
if (zoningPLan->zones[r][c].type == 1) {
queue[queue_end++] = Point2D(c, r);
distanceMap->distances[r][c][0] = 0;
}
}
// 距離マップを生成
while (queue_begin < queue_end) {
Point2D pt = queue[queue_begin++];
int d = distanceMap->distances[pt.y][pt.x][0];
if (pt.y > 0) {
int old = atomicMin(&distanceMap->distances[pt.y-1][pt.x][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x, pt.y-1);
}
}
if (pt.y < CITY_SIZE - 1) {
int old = atomicMin(&distanceMap->distances[pt.y+1][pt.x][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x, pt.y+1);
}
}
if (pt.x > 0) {
int old = atomicMin(&distanceMap->distances[pt.y][pt.x-1][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x-1, pt.y);
}
}
if (pt.x < CITY_SIZE - 1) {
int old = atomicMin(&distanceMap->distances[pt.y][pt.x+1][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x+1, pt.y);
}
}
}
}
/**
* 直近の店までの距離を計算する(シングルスレッド版)
*/
__global__
void computeDistanceToStoreBySingleThread(ZoningPlan* zoningPLan, DistanceMap* distanceMap) {
Point2D queue[1000];
int queue_begin = 0;
int queue_end = 0;
for (int i = 0; i < CITY_SIZE * CITY_SIZE; ++i) {
int r = i / CITY_SIZE;
int c = i % CITY_SIZE;
if (zoningPLan->zones[r][c].type == 1) {
queue[queue_end++] = Point2D(c, r);
distanceMap->distances[r][c][0] = 0;
}
}
while (queue_begin < queue_end) {
Point2D pt = queue[queue_begin++];
int d = distanceMap->distances[pt.y][pt.x][0];
if (pt.y > 0) {
int old = atomicMin(&distanceMap->distances[pt.y-1][pt.x][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x, pt.y-1);
}
}
if (pt.y < CITY_SIZE - 1) {
int old = atomicMin(&distanceMap->distances[pt.y+1][pt.x][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x, pt.y+1);
}
}
if (pt.x > 0) {
int old = atomicMin(&distanceMap->distances[pt.y][pt.x-1][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x-1, pt.y);
}
}
if (pt.x < CITY_SIZE - 1) {
int old = atomicMin(&distanceMap->distances[pt.y][pt.x+1][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x+1, pt.y);
}
}
}
}
int main()
{
time_t start, end;
ZoningPlan* hostZoningPlan = (ZoningPlan*)malloc(sizeof(ZoningPlan));
std::vector<Point2D> hostStoreLocations;
DistanceMap* hostDistanceMap = (DistanceMap*)malloc(sizeof(DistanceMap));
DistanceMap* hostDistanceMap2 = (DistanceMap*)malloc(sizeof(DistanceMap));
// 距離を初期化
memset(hostDistanceMap, 9999, sizeof(DistanceMap));
memset(hostDistanceMap2, 9999, sizeof(DistanceMap));
std::vector<float> zoneTypeDistribution(2);
zoneTypeDistribution[0] = 0.8f;
zoneTypeDistribution[1] = 0.2f;
// 初期プランを生成
// 同時に、店の座標リストを作成
start = clock();
generateZoningPlan(*hostZoningPlan, zoneTypeDistribution, hostStoreLocations);
end = clock();
printf("generateZoningPlan: %lf\n", (double)(end-start)/CLOCKS_PER_SEC);
/*
for (int r = CITY_SIZE - 1; r >= 0; --r) {
for (int c = 0; c < CITY_SIZE; ++c) {
printf("%d, ", hostZoningPlan->zones[r][c].type);
}
printf("\n");
}
printf("\n");
*/
// 初期プランをデバイスバッファへコピー
ZoningPlan* devZoningPlan;
if (cudaMalloc((void**)&devZoningPlan, sizeof(ZoningPlan)) != cudaSuccess) {
printf("memory allocation error!\n");
exit(1);
}
if (cudaMemcpy(devZoningPlan, hostZoningPlan, sizeof(ZoningPlan), cudaMemcpyHostToDevice) != cudaSuccess) {
printf("memory copy error!\n");
exit(1);
}
// 距離マップ用に、デバイスバッファを確保
DistanceMap* devDistanceMap;
cudaMalloc((void**)&devDistanceMap, sizeof(DistanceMap));
///////////////////////////////////////////////////////////////////////
// シングルスレッドで、直近の店までの距離を計算
// 距離をデバイスバッファへコピー
cudaMemcpy(devDistanceMap, hostDistanceMap2, sizeof(DistanceMap), cudaMemcpyHostToDevice);
// スコアの直近の店までの距離を計算
start = clock();
computeDistanceToStoreBySingleThread<<<1, 1>>>(devZoningPlan, devDistanceMap);
end = clock();
printf("computeDistanceToStoreBySingleThread: %lf\n", (double)(end-start)/CLOCKS_PER_SEC);
// 距離をCPUバッファへコピー
cudaMemcpy(hostDistanceMap2, devDistanceMap, sizeof(DistanceMap), cudaMemcpyDeviceToHost);
///////////////////////////////////////////////////////////////////////
// マルチスレッドで、直近の店までの距離を計算
// 距離をデバイスバッファへコピー
cudaMemcpy(devDistanceMap, hostDistanceMap, sizeof(DistanceMap), cudaMemcpyHostToDevice);
// スコアの直近の店までの距離を並列で計算
start = clock();
computeDistanceToStore<<<NUM_GPU_BLOCKS, NUM_GPU_THREADS>>>(devZoningPlan, devDistanceMap);
end = clock();
printf("computeDistanceToStore: %lf\n", (double)(end-start)/CLOCKS_PER_SEC);
// 距離をCPUバッファへコピー
cudaMemcpy(hostDistanceMap, devDistanceMap, sizeof(DistanceMap), cudaMemcpyDeviceToHost);
// シングルスレッドとマルチスレッドの結果を比較
for (int r = CITY_SIZE - 1; r >= 0; --r) {
for (int c = 0; c < CITY_SIZE; ++c) {
if (hostDistanceMap->distances[r][c][0] != hostDistanceMap2->distances[r][c][0]) {
printf("ERROR!\n");
}
}
}
printf("\n");
/*
for (int r = CITY_SIZE - 1; r >= 0; --r) {
for (int c = 0; c < CITY_SIZE; ++c) {
printf("%d, ", hostDistanceMap->distances[r][c][0]);
}
printf("\n");
}
printf("\n");
for (int r = CITY_SIZE - 1; r >= 0; --r) {
for (int c = 0; c < CITY_SIZE; ++c) {
printf("%d, ", hostDistanceMap2->distances[r][c][0]);
}
printf("\n");
}
printf("\n");
*/
// デバイスバッファの開放
cudaFree(devZoningPlan);
cudaFree(devDistanceMap);
// CPUバッファの開放
free(hostZoningPlan);
free(hostDistanceMap);
free(hostDistanceMap2);
cudaDeviceReset();
}
|
21,828 | #include <stdio.h>
#include "multigrid_kernel.cu"
#define N_MALLAS 12
#define BLOCK_SIZE 16
void gpu_imprime(Grid g, const char *);
void gpu_muestra(Grid g, const char *);
void multigrid(Grid *u,
Grid *f,
Grid *v,
Grid *d,
int m,
double *max,
double *def,
double *host_def);
int main(){
int i;
int dim;
int size;
int sizetotal=0;
double max=1.0;
double max_ant;
/* Definition of the Grids */
Grid u[N_MALLAS];
Grid f[N_MALLAS];
Grid v[N_MALLAS];
Grid d[N_MALLAS];
/* Double to compute max(defect) of each file */
double * gpu_def; //In GPU
double * host_def; //In Host
/* malloc */
for(i=0; i<N_MALLAS; i++){
dim = (int)pow(2,i+1)+1;
size = dim*dim;
u[i].d = dim;
f[i].d = dim;
v[i].d = dim;
d[i].d = dim;
u[i].size = size;
f[i].size = size;
v[i].size = size;
d[i].size = size;
cudaMalloc(&u[i].v, size*sizeof(double));
cudaMalloc(&v[i].v, size*sizeof(double));
cudaMalloc(&d[i].v, size*sizeof(double));
cudaMalloc(&f[i].v, size*sizeof(double));
sizetotal +=4*size;
}
int m = N_MALLAS - 1 ;
dim = (int)pow(2,m+1)+1;
size = dim*dim;
cudaMalloc(&gpu_def, size*sizeof(double));
sizetotal+=size;
host_def=(double*)malloc(size*sizeof(double));
printf("We need about %d Mb in the GPU\n", sizetotal*sizeof(double)/1024/1024);
/* To call CUDA */
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((dim+BLOCK_SIZE-1)/dimBlock.x, (dim+BLOCK_SIZE-1)/dimBlock.y);
/* Inicializamos la malla de la función */
cero<<<dimGrid, dimBlock>>>(f[m]);
/* Initialize u[m] with random values */
cero<<<dimGrid, dimBlock>>>(u[m]);
random<<<dimGrid, dimBlock>>>(u[m]);
/* main loop */
for(i=0; i<N_MALLAS; i++){
max_ant = max;
max = 0.0;
multigrid(&u[0],&f[0], &v[0], &d[0], m, &max, gpu_def, host_def);
printf("[Iteration #%d] nd=%e ratio=%f\n", i, max, max/max_ant);
}
/* Free memory */
for(i=0; i<N_MALLAS; i++){
cudaFree(u[i].v);
cudaFree(f[i].v);
cudaFree(v[i].v);
cudaFree(d[i].v);
}
cudaFree(gpu_def);
free(host_def);
return 0;
}
void gpu_imprime(Grid g, const char *nombre){
FILE *f;
f=fopen(nombre, "w");
int i,j;
double *hg;
hg = (double*)malloc(g.size*sizeof(double));
cudaMemcpy(hg,g.v, g.size*sizeof(double), cudaMemcpyDeviceToHost);
for(i=0; i< g.d; i++){
for(j=0; j< g.d; j++){
fprintf(f, "%d %d %f\n", i, j, hg[I(g.d,i,j)]);
}
fprintf(f,"\n");
}
fclose(f);
}
void gpu_muestra(Grid g, const char *nombre){
int i,j;
double *hg;
hg = (double *)malloc(g.size*sizeof(double));
cudaMemcpy(hg, g.v, g.size*sizeof(double), cudaMemcpyDeviceToHost);
printf("%s=\n", nombre);
for( i = 0 ; i<g.d; i++){
for(j=0; j<g.d; j++){
printf("%f ",hg[I(g.d,i,j)]);
}
printf("\n");
}
}
void multigrid(Grid *u,
Grid *f,
Grid *v,
Grid *d,
int m,
double *max,
double *def,
double *host_def)
{
int dim;
int dim_;
int i;
/* Primer caso, malla 0, solución */
if(m == 0){
exacta<<<1,1>>>(u[m],f[m]);
}
else{ /* Some definitions to call cuda */
dim = (int)pow(2,m+1)+1;
dim_ = (int)pow(2,m)+1;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((dim+BLOCK_SIZE-1)/dimBlock.x, (dim+BLOCK_SIZE-1)/dimBlock.y);
dim3 dimGrid_((dim_+BLOCK_SIZE-1)/dimBlock.x,(dim_+BLOCK_SIZE-1)/dimBlock.y);
/* some grids == 0 */
cero<<<dimGrid , dimBlock>>>(v[m]);
cero<<<dimGrid , dimBlock>>>(d[m]);
cero<<<dimGrid_, dimBlock>>>(u[m-1]);
cero<<<dimGrid_, dimBlock>>>(f[m-1]);
/* R-N smoothing */
suaviza_r<<<dimGrid, dimBlock>>>(u[m],f[m]);
suaviza_n<<<dimGrid, dimBlock>>>(u[m],f[m]);
/* Compute the defect */
defecto<<<dimGrid, dimBlock>>>(u[m],f[m],d[m]);
/* Defect from d[m] to f[m-1] */
restringe<<<dimGrid_, dimBlock>>>(d[m],f[m-1]);
/* Call to multigrid */
multigrid(&u[0],&f[0],&v[0],&d[0],m-1,max, def, host_def);
/* Interpolate from u[m-1] to v[m] */
interpola<<<dimGrid_, dimBlock>>>(u[m-1], v[m]);
/* Sum */
suma<<<dimGrid, dimBlock>>>(u[m],v[m]);
/* R-N smoothing */
suaviza_r<<<dimGrid, dimBlock>>>(u[m],f[m]);
suaviza_n<<<dimGrid, dimBlock>>>(u[m],f[m]);
/* If we're in the upper grid, check the defect */
if(m==N_MALLAS-1){
defecto<<<dimGrid, dimBlock>>>(u[m],f[m],d[m]);
dim3 dg((dim+BLOCK_SIZE-1)/dimBlock.x,1);
dim3 db(BLOCK_SIZE,1);
/* compute the max or each row */
maxx<<<dg, db>>>(d[m],def);
/* copy the vector to the host */
cudaMemcpy(host_def, def, dim*dim*sizeof(double), cudaMemcpyDeviceToHost);
max[0]=0.0;
for(i=0;i<dim;i++){
if(max[0]<host_def[i])
max[0]=host_def[i];
}
}
}
}
|
21,829 | #include <stdio.h>
#define N 16
#define BLOCK_SIZE 32 < N ? 32 : N
void matrixMultCPU(int a[N][N], int b[N][N], int c[N][N]) {
int n,m;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
int sum = 0;
for (int k = 0; k < N; k++) {
m = a[i][k];
n = b[k][j];
sum += m * n;
}
c[i][j] = sum;
}
}
}
__global__ void matrixMultGPU(int *a, int *b, int *c) {
int k, sum = 0;
int col = threadIdx.x + blockDim.x * blockIdx.x;
int fil = threadIdx.y + blockDim.y * blockIdx.y;
__shared__ float A[BLOCK_SIZE][N];
__shared__ float B[BLOCK_SIZE][N];
for (int i = threadIdx.x; i < N; i+=blockDim.x){
A[threadIdx.y][i] = a[fil*N + i];
}
for (int i = threadIdx.y; i < N; i+=blockDim.y){
B[threadIdx.x][i] = b[i*N + col];
}
__syncthreads();
if (col < N && fil < N) {
// #pragma unroll
for (k = 0; k < N; k++) {
sum += A[threadIdx.y][k] * B[threadIdx.x][k];
}
c[fil * N + col] = sum;
}
}
int main() {
int a[N][N], b[N][N], c[N][N], d[N][N];
int *dev_a, *dev_b, *dev_c;
int cont,i,j;
/* inicializando variables con datos*/
for (i = 0; i < N; i++) {
cont = 0;
for (j = 0; j < N; j++) {
a[i][j] = cont;
b[i][j] = cont;
cont++;
}
}
int size = N * N * sizeof(int);
cudaMalloc((void **) &dev_a, size);
cudaMalloc((void **) &dev_b, size);
cudaMalloc((void **) &dev_c, size);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
dim3 dimGrid((N+32-1)/32, (N+32-1)/32);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
// Record the start event
cudaEventRecord(start, NULL);
// Repita la ejecucion del kernel 1000 veces para eliminar
// efectos de arranque en frio
int nIter = 1000;
for (int j = 0; j < nIter; j++)
matrixMultGPU<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c);
// Record the stop event
cudaEventRecord(stop, NULL);
// Wait for the stop event to complete
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
// Compute and print the performance
float msecPerKernelExecution = msecTotal / nIter;
double flopsPerMMul = 2.0 * N * N * N;
double gigaFlops = (flopsPerMMul * 1.0e-9f) /
(msecPerKernelExecution / 1000.0f);
printf("GFlops: %lf\n", gigaFlops);
printf("TPKernel: %lf\n", msecPerKernelExecution);
printf("Size: %d\n", N);
matrixMultCPU(a,b,d);
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
// comprobando
for (int y = 0; y < N; y++) {
for (int x = 0; x < N; x++) {
if (c[y][x] != d[y][x]){
printf("ERROR en %d %d, %d != %d\n", y,x,c[y][x], d[y][x]);
return 1;
}
}
}
printf("SUCCESS\n");
return 0;
} |
21,830 | #include <cuda.h>
#include <float.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#define N 64
#define K 3
#define THPERBLOCK 32
#define ITER 100
typedef struct Data {
float* x;
float* y;
} data;
data* read_data(const char* file) {
data* d = NULL;
FILE* f = fopen (file, "r");
/*
int num_node;
if (fscanf(f, "%5d\n", &num_node) == 0) {
printf("read inputfile failed.\n");
return NULL;
}
printf("num_node = %d\n", num_node);
*/
d = (data*) malloc(sizeof(data));
d->x = (float*)malloc(N * sizeof(float));
d->y = (float*)malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
fscanf(f, "%f %f\n", &(d->x[i]), &(d->y[i]));
}
printf("Point --------------------------------------\n");
for (int i = 0; i < N; i++) {
printf("%d: x = %f, y = %f\n", i, d->x[i], d->y[i]);
}
printf("\n");
fclose(f);
return d;
}
data* read_cent(const char* file) {
data* d = NULL;
FILE* f = fopen (file, "r");
d = (data*) malloc(sizeof(data));
d->x = (float*)malloc(K * sizeof(float));
d->y = (float*)malloc(K * sizeof(float));
for (int i = 0; i < K; i++) {
fscanf(f, "%f %f\n", &(d->x[i]), &(d->y[i]));
}
printf("Cluster location ---------------------------\n");
for (int i = 0; i < K; i++) {
printf("%d: x = %f, y = %f\n", i, d->x[i], d->y[i]);
}
printf("\n");
fclose(f);
return d;
}
float dist_err(const float x, const float y, const float cx, const float cy) {
return sqrt(pow(x - cx, 2.0) + pow(y - cy, 2.0));
}
float calculate_err(float* prev_cx, float* prev_cy, float* cx_result, float* cy_result) {
float err = 0;
for (int i = 0; i < K; i++) {
err += dist_err(prev_cx[i], prev_cy[i], cx_result[i], cy_result[i]);
prev_cx[i] = cx_result[i];
prev_cy[i] = cy_result[i];
}
err /= K;
return err;
}
__device__ float dist(const float x, const float y, const float cx, const float cy) {
return sqrtf(powf(x - cx, 2.0) + powf(y - cy, 2.0));
}
__global__ void cluster_assign(const float* x, const float* y, float* cx, float* cy, int* cluster_label) {
const unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= N) {
return;
}
float min_dist = 99999999;
int closest_cent = 0;
for (int i = 0; i < K; i++) {
float distance = dist(x[id], y[id], cx[i], cy[i]);
if (distance < min_dist) {
min_dist = distance;
closest_cent = i;
}
}
cluster_label[id] = closest_cent;
}
__global__ void cent_update(const float* x, const float* y, float* cx, float* cy, int* cluster_label, int* cluster_size) {
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= N) {
return;
}
const int s_id = threadIdx.x;
__shared__ float s_x[THPERBLOCK];
__shared__ float s_y[THPERBLOCK];
__shared__ int s_cluster_label[THPERBLOCK];
s_x[s_id] = x[id];
s_y[s_id] = y[id];
s_cluster_label[s_id] = cluster_label[id];
__syncthreads();
if (s_id == 0) {
float cent_x_sum[K] = { 0 };
float cent_y_sum[K] = { 0 };
int cent_cluster_size[K] = { 0 };
for (int i = 0; i < blockDim.x; i++) {
int cluster_id = s_cluster_label[i];
cent_x_sum[cluster_id] += s_x[i];
cent_y_sum[cluster_id] += s_y[i];
cent_cluster_size[cluster_id]++;
}
for (int i = 0; i < K; i++) {
atomicAdd(&cx[i], cent_x_sum[i]);
atomicAdd(&cy[i], cent_y_sum[i]);
atomicAdd(&cluster_size[i], cent_cluster_size[i]);
}
}
__syncthreads();
if (id < K) {
cx[id] = cx[id] / cluster_size[id];
cy[id] = cy[id] / cluster_size[id];
}
}
void print_result (const float* cx, const float* cy, const int* label) {
printf("\t");
for (int i = 0; i < N; i++) {
printf("%3d", i);
}
printf("\nLabel : ");
for (int i = 0; i < N; i++) {
printf("%3d", label[i]);
}
for (int i = 0; i < K; i++) {
printf("\ncentroid %d\t :%5f %5f", i, cx[i], cy[i]);
}
printf("\n");
}
int main (int argc, char** argv) {
if (argc != 3) {
printf("Usage: %s <nodes file> <centroids file>\n", argv[0]);
return 0;
} else {
data* d = read_data(argv[1]);
data* c = read_cent(argv[2]);
float* cx_result = (float*)malloc(K * sizeof(float));
float* cy_result = (float*)malloc(K * sizeof(float));
int* result = (int*) malloc(N * sizeof(int));
printf("Let's do CLUSTER!\n");
float* d_x; // for cudamalloc in 'nodes data'
float* d_y;
int* d_cluster_label;
float* d_cx;
float* d_cy;
float* d_cx_result; // result centroid
float* d_cy_result;
int* d_cluster_size;
cudaMalloc((void**) &d_x, N * sizeof(float));
cudaMalloc((void**) &d_y, N * sizeof(float));
cudaMalloc((void**) &d_cluster_label, N * sizeof(int));
cudaMalloc((void**) &d_cx, K * sizeof(float));
cudaMalloc((void**) &d_cy, K * sizeof(float));
cudaMalloc((void**) &d_cx_result, K * sizeof(float));
cudaMalloc((void**) &d_cy_result, K * sizeof(float));
cudaMalloc((void**) &d_cluster_size, K * sizeof(int));
cudaMemcpy(d_x, d->x, N * sizeof(float), cudaMemcpyHostToDevice); // node memcpy
cudaMemcpy(d_y, d->y, N * sizeof(float), cudaMemcpyHostToDevice); // centroid memcpy
cudaMemcpy(d_cx, c->x, K * sizeof(float), cudaMemcpyHostToDevice); // node memcpy
cudaMemcpy(d_cy, c->y, K * sizeof(float), cudaMemcpyHostToDevice); // centroid memcpy
int cur_iter = 1;
float err = 1;
float* prev_cx = (float*)malloc(K * sizeof(float));
float* prev_cy = (float*)malloc(K * sizeof(float));
for (int i = 0; i < K; i++) {
prev_cx[i] = c->x[i];
prev_cy[i] = c->y[i];
}
while (cur_iter < ITER) {
cluster_assign<<<(N + THPERBLOCK - 1)/THPERBLOCK, THPERBLOCK>>>(d_x, d_y, d_cx, d_cy, d_cluster_label);
// cudaMemcpy(cx_result, d_cx, K * sizeof(float), cudaMemcpyDeviceToHost);
// cudaMemcpy(cy_result, d_cy, K * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemset(d_cx, 0.0, K * sizeof(float));
cudaMemset(d_cy, 0.0, K * sizeof(float));
cudaMemset(d_cluster_size, 0, K * sizeof(int));
cent_update<<<(N + THPERBLOCK - 1)/THPERBLOCK, THPERBLOCK>>>(d_x, d_y, d_cx, d_cy, d_cluster_label, d_cluster_size);
cudaMemcpy(cx_result, d_cx, K * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(cy_result, d_cy, K * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < K; i++) {
printf("Iter %d:\tcent %d\t x:\t%f\ty:\t%f\n", cur_iter, i, cx_result[i], cy_result[i]);
}
printf("\n");
err = calculate_err(prev_cx, prev_cy, cx_result, cy_result);
if (err < 0.0001) {
break;
}
cur_iter++;
}
cudaMemcpy(result, d_cluster_label, N * sizeof(int), cudaMemcpyDeviceToHost); // cuda memcopy d to h
// cudaMemcpy(cx_result, d_cx_result, K * sizeof(float), cudaMemcpyDeviceToHost);
// cudaMemcpy(cy_result, d_cy_result, K * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_cluster_label);
cudaFree(d_cx);
cudaFree(d_cy);
cudaFree(d_cx_result);
cudaFree(d_cy_result);
cudaFree(d_cluster_size);
printf("print the result!\n");
print_result(cx_result, cy_result, result);
// free(d);
// free(c);
free(cx_result);
free(cy_result);
free(result);
free(prev_cx);
free(prev_cy);
}
return 0;
}
/*
// TODO
void free_data(data* d) {
}
*/
|
21,831 | #include <stdio.h>
#include <unistd.h>
#define CUDA_CHECK_RETURN( value ) { \
cudaError_t _m_cudaStat = value; \
if ( _m_cudaStat != cudaSuccess ) { \
fprintf( stderr, "Error '%s' at line %d in file %s\n", \
cudaGetErrorString( _m_cudaStat ), __LINE__, __FILE__ ); \
exit( 1 ); \
} }
int main()
{
int deviceCount = 0;
CUDA_CHECK_RETURN( cudaGetDeviceCount( &deviceCount ) );
printf( "Device count: %d\n", deviceCount );
//
for( int i = 0; i < deviceCount; i++ )
{
CUDA_CHECK_RETURN( cudaSetDevice( i ) );
cudaDeviceProp deviceProp;
CUDA_CHECK_RETURN( cudaGetDeviceProperties( &deviceProp, i ) );
printf("GPU%d is capable of directly accessing memory from \n", i );
for( int j = 0; j < deviceCount; j++ )
{
if( i == j )
continue;
int accessible;
cudaDeviceCanAccessPeer( &accessible, i, j );
printf( " GPU%d: %s\n", j, accessible ? "yes" : "no" );
}
}
return 0;
}
|
21,832 | #include<stdio.h>
#include<stdlib.h>
//#include<string.h>
#include<math.h>
#include<cuda_runtime.h>
#define INF (64 * 64 * 128 * 2)
#define N_FEATURE (128)
typedef float fv[N_FEATURE];
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__global__ static void kmeans_cluster(int max_iter, fv *points,fv *center, int n_line, int n_clusters){
int i,j,k,l,q;
extern __shared__ unsigned int cluster[];
__shared__ int p;
float m,n;
fv tfeature;
for(l = 0; l < max_iter;l++){
if(threadIdx.x == 0)
p = 0;
__syncthreads();
for(i = threadIdx.x;i < n_line;i+=blockDim.x){
//memcpy(tfeature,points[i],sizeof(float)*N_FEATURE);
for(j = 0; j < N_FEATURE;j++)
tfeature[j] = points[i][j];
n = INF;
for(j = 0; j < n_clusters;j++){
m = 0;
for(k = 0;k < N_FEATURE;k++)
m += (tfeature[k] - center[j][k]) * (tfeature[k] - center[j][k]);
if(m < n){
q = j;
n = m;
}
}
//sum += n;
if(q != cluster[i]){
cluster[i] = q;
if(p == 0)
atomicAdd(&p, 1);
}
}
__syncthreads();
if(p == 0){
return ;
}
for(i = threadIdx.x;i < n_clusters;i+=blockDim.x){
for(j = 0; j < N_FEATURE;j++)
tfeature[j] = 0;
n = 0;
for (j = 0; j < n_line;j++)
if(cluster[j] == i){
n++;
for(k = 0;k < N_FEATURE;k++)
tfeature[k] += points[j][k];
}
if(n > 0)
for(j = 0; j < N_FEATURE;j++)
tfeature[j] /= n;
//really need else to add point
tfeature[0] = n;
//memcpy(center[i],tfeature,sizeof(float)*N_FEATURE);
for(j = 0; j < N_FEATURE;j++)
center[i][j] = tfeature[j];
}
}
}
int main(int argc, char* argv[]){
float sum;
int n_clusters = atoi(argv[1]);
fv* points;
fv center[n_clusters];
int n_line = 0, i, j, k, l, p, q;
int tab[n_clusters];
float m, n;
char str[256];
int b_continue = 0;
if(argc > 4 && strcmp(argv[4],"-c") == 0)
b_continue = 1;
FILE *fp_list = fopen(argv[2], "r"), *fp_file;
fgets(str, 256, fp_list);
while(!feof(fp_list)){
str[strlen(str) - 1] = '\0';
fp_file = fopen(str, "r");
fscanf(fp_file, "%*d%d", &j);
n_line += j;
fclose(fp_file);
fgets(str, 256, fp_list);
}
points = (fv*)malloc(sizeof(float) * N_FEATURE * n_line);
printf("total %d lines\n",n_line);
l = 0;
rewind(fp_list);
fgets(str, 256, fp_list);
while(!feof(fp_list)){
str[strlen(str) - 1] = '\0';
fp_file = fopen(str, "r");
fscanf(fp_file, "%*d%d", &j);
for(i = 0;i < j;i++){
fscanf(fp_file, "%*f%*f%*f%*f%*f");
q = 0;
for(k = 0;k < N_FEATURE;k++){
fscanf(fp_file, "%d", &p);
q += p;
points[l + i][k] = sqrtf(p); // sqrt it
}
m = sqrtf(q);
if(q > 0)
for(k = 0;k < N_FEATURE;k++){
points[l + i][k] /= m; //l2 normal
}
}
l += j;
fclose(fp_file);
fgets(str, 256, fp_list);
}
fclose(fp_list);
puts("data load done!");
//init center
if(b_continue){
fp_list = fopen(argv[3], "r");
for(i = 0;i < n_clusters;i++){
for(j = 0;j < N_FEATURE;j++)
fscanf(fp_list, "%f",¢er[i][j]);
}
fclose(fp_list);
}
else{
for(i = 0;i < n_clusters;i++)
for(j = 0;j < N_FEATURE;j++)
center[i][j] = random() / (float) RAND_MAX;
}
//kmeans
fv* dev_points;
fv* dev_center;
HANDLE_ERROR( cudaMalloc( (void**)&dev_center, n_clusters * N_FEATURE * sizeof(float) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_points, n_line * N_FEATURE * sizeof(float) ) );
HANDLE_ERROR( cudaMemcpy( dev_center, center, n_clusters * N_FEATURE * sizeof(float), cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_points, points, n_line * N_FEATURE * sizeof(float), cudaMemcpyHostToDevice ) );
puts("running cuda.");
kmeans_cluster<<<1,2,n_line * sizeof(float)>>>(1000,dev_points,dev_center,n_line,n_clusters);
HANDLE_ERROR( cudaMemcpy( center, dev_center, n_clusters * N_FEATURE * sizeof(float), cudaMemcpyDeviceToHost ) );
fp_list = fopen(argv[3], "w");
for(i = 0;i < n_clusters;i++){
for(j = 0;j < N_FEATURE;j++)
fprintf(fp_list, "%f ",center[i][j]);
fprintf(fp_list,"\n");
}
fclose(fp_list);
free(points);
HANDLE_ERROR( cudaFree( dev_points ) );
HANDLE_ERROR( cudaFree( dev_center ) );
return p;
}
|
21,833 | #include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <time.h>
#define BLOCK_WIDTH 16
#define TILE_WIDTH 16
#define width 2
//GlobalMem - From Kirk and Hwu, 2012,
__global__ void matrixMulKernel(float* d_M, float* d_N, float* d_P, int Width) {
// Calculate the row index of the d_Pelement and d_M
int Row = blockIdx.y*blockDim.y+threadIdx.y;
// Calculate the column index of d_P and d_N
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if ((Row < Width) && (Col < Width)) {
float Pvalue = 0;
// each thread computes one element of the block sub-matrix
for (int k = 0; k < Width; ++k) {
Pvalue += d_M[Row*Width+k]*d_N[k*Width+Col];
}
d_P[Row*Width+Col] = Pvalue;
}
}
//
__global__ void matrixMulKernel2(float* d_M, float* d_N, float* d_P,
int Width) {
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the d_M and d_N tiles required to compute d_P element
for (int m = 0; m < Width/TILE_WIDTH; ++m) {
// Coolaborative loading of d_M and d_N tiles into shared memory
Mds[ty][tx] = d_M[Row*Width + m*TILE_WIDTH + tx];
Nds[ty][tx] = d_N[(m*TILE_WIDTH + ty)*Width + Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
d_P[Row*Width + Col] = Pvalue;
}
//Main
int main(void){
//Print device properties
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" MaxThreadPerBlock: %d\n",
prop.maxThreadsPerBlock);
printf(" MaxThreadsDim0: %d\n",
prop.maxThreadsDim[0]);
printf(" MaxThreadsDim1: %d\n",
prop.maxThreadsDim[1]);
printf(" MaxThreadsDim2: %d\n",
prop.maxThreadsDim[2]);
printf(" MaxGridSize: %d\n",
prop.maxGridSize[1]);
printf(" Warp Size: %d\n",
prop.warpSize);
}
//Allocate memory in host RAM
float *A_h, *B_h, *C_h;
cudaMallocHost((void **) &A_h, (width*width)*sizeof(float));
cudaMallocHost((void **) &B_h, (width*width)*sizeof(float));
cudaMallocHost((void **) &C_h, (width*width)*sizeof(float));
//Allocate memory in device RAM
float *A_d, *B_d, *C_d;
cudaMalloc((void **) &A_d, (width*width)*sizeof(float));
cudaMalloc((void **) &B_d, (width*width)*sizeof(float));
cudaMalloc((void **) &C_d, (width*width)*sizeof(float));
//Populate First Matrix
int i, j;
srand(1);
for (i = 0; i < width; i++){
for (j = 0; j < width; j++) {
A_h[i*width + j] = ((float)rand()/(float)(RAND_MAX)) * 100;
printf("%.2f ", A_h[i*width + j]);
}
printf("\n");
}
printf("\n");
//Populate Second Matrix
for (i = 0; i < width; i++){
for (j = 0; j < width; j++) {
B_h[i*width + j] = ((float)rand()/(float)(RAND_MAX)) * 100;
printf("%.2f ", B_h[i*width + j]);
}
printf("\n");
}
//Mem copy from host to device
cudaMemcpy(A_d, A_h, (width*width)*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, (width*width)*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(C_d, C_h, (width*width)*sizeof(float), cudaMemcpyHostToDevice);
//From Kirk and Hwu, 2012
int NumBlocks = width/BLOCK_WIDTH;
if (width % BLOCK_WIDTH) NumBlocks++;
dim3 dimGrid(NumBlocks, NumBlocks);
dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH);
// matrixMulKernel<<<dimGrid, dimBlock>>>(A_d, B_d, C_d, width);
matrixMulKernel2<<<dimGrid, dimBlock>>>(A_d, B_d, C_d, width);
//Mem Copy
cudaMemcpy(C_h, C_d, (width*width)*sizeof(float), cudaMemcpyDeviceToHost);
//Print matrix A
for (i = 0; i < width; i++){
for (j = 0; j < width; j++) {
printf("%.2f ", C_h[i*width + j]);
}
printf("\n");
}
printf("\n");
//Free up memory
cudaFree(A_h);
cudaFree(B_h);
cudaFree(C_h);
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
}
|
21,834 | #include <stdio.h>
#include <assert.h>
#define ARRAY_SIZE 5
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// Kernel definition
__global__ void addKernel(int* d_a, int* d_b, int* d_c)
{
int i = threadIdx.x;
d_c[i] = d_a[i] + d_b[i];
}
void onDevice(int* h_a, int* h_b, int* h_c){
int *d_c;
//allocate memory on the device
cudaMalloc( (void**)&d_c, ARRAY_BYTES );
addKernel<<<1, ARRAY_SIZE>>>(h_a, h_b, d_c);
//Copy memory from Device to Host
cudaMemcpy( &h_c, d_c, ARRAY_BYTES, cudaMemcpyDeviceToHost );
cudaFree( d_c );
}
void onHost()
{
int h_a[ARRAY_SIZE];
int h_b[ARRAY_SIZE];
int *h_c;
//allocate memory on the host
h_c = (int*)malloc(ARRAY_BYTES);
for(int i =0; i< ARRAY_SIZE; i++){
h_a[i] = 1;
h_b[i] = 1;
h_c[i] = 0;
}
onDevice(h_a, h_b, h_c);
for(int i =0; i< ARRAY_SIZE; i++){
printf("%i \n",h_a[0]);
assert(h_a[i] + h_b[i] == h_c[i]);
}
printf("-: successful execution :-\n");
free(h_c);
}
int main(){
onHost();
return 0;
}
|
21,835 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
using namespace std;
#define block_size 32
#define pl_end_number 1000000
#define vector_size 1000
__global__ void prime( int *a, int *b, int *c ) {
int tid = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id
if (tid < vector_size){
c[tid] = a[tid] + b[tid]; // add vectors together
}
}
// ********************** MAIN FUNCTION **********************
int main( void ) {
cout << "Program Start" << endl;
cudaSetDevice(0);
// Time Variables
cudaEvent_t start, stop;
float time;
cudaEventCreate (&start);
cudaEventCreate (&stop);
bool *small_sieve = new bool [pl_end_number];
int bool_size = sizeof(bool);
for (unsigned long long int i = 0; i < pl_end_number; i++) {
small_sieve[i] = true;
}
cudaEventRecord(start,0);
for (unsigned long long int i = 2; i <= int(sqrt(pl_end_number))+1; i++) {
for (unsigned long long int j = i+1; j <= pl_end_number; j++) {
if (j % i == 0) {
small_sieve[j] = false;
//cout << j << " is Composite, as divisible by " << i << endl;
}
}
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tPrime Numbers Computation Time on CPU: %.2f ms\n", time);
// cout << "Primes till 100\n";
unsigned long long int small_sieve_counter = 0;
for (unsigned long long int i = 2; i <= pl_end_number; i++) {
if (small_sieve[i] == true) {
//cout << i << " ";
small_sieve_counter++;
//cout << small_sieve[i] << " ";
}
}
cout << endl;
unsigned long long int *prime_list = new unsigned long long int [small_sieve_counter];
unsigned long long int inner_counter = 0;
for (unsigned long long int i = 2; i <= pl_end_number; i++) {
if (small_sieve[i] == true) {
prime_list[inner_counter] = i;
inner_counter++;
}
}
// Pointers in GPU memory
int *dev_il;
int *dev_pl;
// Create Input list
unsigned long long int start_number = pl_end_number+1;
unsigned long long int il_size = pl_end_number*pl_end_number;
bool *input_list = new bool [il_size];
for (unsigned long long int i =0; i < il_size; i++) {
input_list[i] = true;
}
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_il, vector_size * bool_size );
cudaMalloc( (void**)&dev_pl, small_sieve_counter * bool_size );
// copy the arrays 'a' and 'b' to the GPU
// cudaMemcpy( dev_a, a, vector_size * sizeof(int),
// cudaMemcpyHostToDevice );
// cudaMemcpy( dev_b, b, vector_size * sizeof(int),
// cudaMemcpyHostToDevice );
//
// GPU Calculation
////////////////////////
// printf("Running parallel job.\n");
int grid_size = (vector_size-1)/block_size;
grid_size++;
cudaEventRecord(start,0);
//prime<<<grid_size,block_size>>>( dev_a, dev_b, dev_c);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// printf("\tParallel Job Time: %.2f ms\n", time);
// copy the array 'c' back from the GPU to the CPU
// cudaMemcpy( c_gpu, dev_c, vector_size * sizeof(int),
// cudaMemcpyDeviceToHost );
// compare the results
// int error = 0;
// for (int i = 0; i < vector_size; i++) {
// if (c_cpu[i] != c_gpu[i]){
// error = 1;
// // printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] );
// }
// if (error) break;
// }
// if (error == 0){
// printf ("Correct result. No errors were found.\n");
// }
// free the memory allocated on the GPU
// cudaFree( dev_a );
// cudaFree( dev_b );
// cudaFree( dev_c );
// free(a);
// free(b);
// free(c_cpu);
// free(c_gpu);
return 0;
}
|
21,836 | //pass
//--blockDim=[8,8] --gridDim=[1,1] --no-inline
#include <cuda.h>
#define _2D_ACCESS(A, y, x, X_DIM) A[(y)*(X_DIM)+(x)]
#define X_DIMENSION 0
#define Y_DIMENSION 1
#define BLOCK_DIM (1 << 3)
#define num_vertices (1 << 6)
#define _U 0
#define _I 2
__global__ void transitive_closure_stage1_kernel(unsigned int* graph, int passnum)
{
__shared__ unsigned int primary_block_buffer[BLOCK_DIM][BLOCK_DIM];
int idxY = passnum * BLOCK_DIM + threadIdx.y;
int idxX = passnum * BLOCK_DIM + threadIdx.x;
primary_block_buffer[threadIdx.y][threadIdx.x] = _2D_ACCESS(graph, idxY, idxX, num_vertices);
__syncthreads();
for (unsigned int k = 0; k < BLOCK_DIM; ++k)
{
if ( primary_block_buffer[threadIdx.y][threadIdx.x] == _U)
{
if ( (primary_block_buffer[threadIdx.y][k] != _U) && (primary_block_buffer[k][threadIdx.x] != _U) )
{
primary_block_buffer[threadIdx.y][threadIdx.x] = passnum*BLOCK_DIM + k + _I;
}
}
__syncthreads();
}
_2D_ACCESS(graph, idxY, idxX, num_vertices) = primary_block_buffer[threadIdx.y][threadIdx.x];
}
|
21,837 | #include <stdio.h>
__global__ void helloFromGPU(void) {
printf("Hello World from GPU, blockIdx: %d threadIdx: %d\n", blockIdx.x, threadIdx.x);
}
int main(void) {
printf("Hello World from CPU1\n");
helloFromGPU<<<1024, 10>>>();
//cudaDeviceSynchronize();
printf("Hello World from CPU2\n");
cudaDeviceReset();
return 0;
} |
21,838 | #include "includes.h"
__global__ void profileLevelUp_kernel() {} |
21,839 |
#include "GPUTSPSolverKernel.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <math.h>
void safeCuda(cudaError work, const char *msg) { if (work != cudaSuccess) { printf("CUDA ERROR at (%s) with code %d\n", msg, work); exit(EXIT_FAILURE); } }
void d_initRandom(unsigned long seed, curandState *pState) {
d_initRandomKernel << <1, 1 >> >(seed, pState);
}
__global__ void d_initRandomKernel(unsigned long seed, curandState *pState) {
curand_init(seed, 0, 0, pState);
}
__device__ unsigned int randi(curandState *pState, int add, int min, int max) {
curandState localState = *pState;
unsigned int rndval = min + (curand(&localState) * add + add)%(max-min+1);
*pState = localState;
return rndval;
}
__device__ float randf(curandState *pState, int add, float min, float max) {
curandState localState = *pState;
float rndval = min + (((curand(&localState) * add + add)%100000)/100000.0)*(max - min);
*pState = localState;
return rndval;
}
// gene initialization
void d_geneInit(int blocks, int threads, curandState *pState, unsigned int nPopulation, unsigned int numCities, int *gene, const float *cityLoc) {
printf("gene initialization at device started....\n");
d_geneInitKernel << < blocks, threads >> >(pState, nPopulation, numCities, gene, cityLoc);
printf("gene initialization at device done\n");
}
__global__ void d_geneInitKernel(curandState_t *pstate, unsigned int nPopulation, unsigned int numCities, int *gene, const float *cityLoc) {
// tId: gene idx ( tId-th gene with numCities elements)
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId >= nPopulation) return;
// gene initialization - cards straight
for (int i = 0; i < numCities; i++) {
gene[tId*numCities+i] = i;
}
// gene shuffle
for (int i = 1; i < numCities; i++) {
int rIdx = randi(pstate, threadIdx.x+blockIdx.x+blockDim.x, (unsigned int) 1, (unsigned int) numCities - 1);
int t = gene[tId*numCities + i];
gene[tId * numCities + i] = gene[tId * numCities + rIdx];
gene[tId * numCities + rIdx] = t;
}
}
// compute fitness of a specific (idx) gene
void d_computeFitnessOf(int blocks, int threads, int idx, float *cityLoc, int *gene, int nPopulation, int nCities, int *fitness, int *distance) {
d_fitnessOf << < blocks, threads >> > (idx, cityLoc, gene, nPopulation, nCities, fitness, distance);
}
__global__ void d_fitnessOf(int idx, float *cityLoc, int *gene, int nPopulation, int nCities, int *fitness, int *distance) {
// tId: index within a gene
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId >= nCities) return;
int idxA = gene[idx*nCities+tId];
int idxB = gene[idx*nCities+((tId + 1) % nCities)];
float dx = cityLoc[idxA * 2] - cityLoc[idxB * 2];
float dy = cityLoc[idxA * 2 + 1] - cityLoc[idxB * 2 + 1];
distance[tId] = (int)(sqrt(double(dx*dx + dy*dy) + 0.5));
//printf("[%d, tId:%d-%d] distance %d(%5.2f, %5.2f) %d(%5.2f, %5.2f): (%5.2f, %5.2f) %d\n", idx, tId, (tId + 1) % nCities, idxA, cityLoc[idxA * 2], cityLoc[idxA * 2 + 1], idxB, cityLoc[idxB * 2], cityLoc[idxB * 2 + 1], dx, dy, distance[tId]);
}
// compute the fitness values of all genes
void d_computeFitnessAll(int blocks, int threads, float *cityLoc, int *gene, int nPopulation, int nCities, int *fitness) {
d_computeAllFitnessKernel << < blocks, threads >> > (cityLoc, gene, nPopulation, nCities, fitness);
}
__global__ void d_computeAllFitnessKernel(float *cityLoc, int *gene, int nPopulation, int nCities, int *fitness) {
// tId: gene index
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId >= nPopulation) return;
fitness[tId] = 0;
for (int i = 0; i < nCities; i++) {
int idxA = gene[tId*nCities + i];
int idxB = gene[tId*nCities + ((i + 1) % nCities)];
float dx = cityLoc[idxA * 2] - cityLoc[idxB * 2];
float dy = cityLoc[idxA * 2 + 1] - cityLoc[idxB * 2 + 1];
fitness[tId] = fitness[tId] + (int)(sqrt(dx*dx + dy*dy) + 0.5);
}
}
// gene copy
void d_copyGene(int blocks, int threads, int toIdx, int fromIdx, int *d_gene, int nCities) {
d_copyGeneKernel << <blocks, threads >> >(toIdx, fromIdx, d_gene, nCities);
}
__global__ void d_copyGeneKernel(int toIdx, int fromIdx, int *d_gene, int nCities) {
// tId: index within a gene
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId >= nCities) return;
d_gene[toIdx*nCities + tId] = d_gene[fromIdx*nCities + tId];
}
// gene crossover
void d_initAuxMem(int blocks, int threads, int nCities, int i, int *d_gene, int *d_orderOfCity, int *d_fJump, int *d_bJump) {
d_initAuxMemKernel << < blocks, threads >> > (nCities, i, d_gene, d_orderOfCity, d_fJump, d_bJump);
}
__global__ void d_initAuxMemKernel(int nCities, int i, int *d_gene, int *d_orderOfCity, int *d_fJump, int *d_bJump) {
// tId: index within a gene
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId >= nCities) return;
d_fJump[i*nCities + tId] = (tId == nCities-1)? 2:1;
d_bJump[i*nCities + tId] = (tId == 1)? 2: 1;
int city = d_gene[i*nCities + tId];
d_orderOfCity[i*nCities + city] = tId;
}
void d_crossover(int blocks, dim3 threads, int i, int nPopulation, int nGroups, int nCities, int *d_gene, float *d_cityLoc, int *d_orderOfCity, int *d_fJump, int *d_bJump) {
d_crossoverKernel << < blocks, threads >> > (i, nPopulation, nGroups, nCities, d_gene, d_cityLoc, d_orderOfCity, d_fJump, d_bJump);
}
__global__ void d_crossoverKernel(int i, int nPopulation, int nGroups, int nCities, int *d_gene, float *d_cityLoc, int *d_orderOfCity, int *d_fJump, int *d_bJump) {
// tId: crossover index
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int tGroup = threadIdx.y;
int nMemberOfAGroup = nPopulation / nGroups;
int start = tGroup*nMemberOfAGroup;
int end = (tGroup == nGroups - 1) ? nPopulation - 1 : start + nMemberOfAGroup;
nMemberOfAGroup = end - start;
int nCrossover = nMemberOfAGroup / 4;
if (tId >= nCrossover ) return;
int p1 = start + tId * 2;
int p2 = p1 + 1;
int child = start + nCrossover*2 + tId;
int lastCity = d_gene[child*nCities + i - 1];
// find candidates from parent 1
int idx1 = d_orderOfCity[p1*nCities + lastCity];
int fnode = (idx1 + d_fJump[p1*nCities + idx1]) % nCities;
int bnode = idx1 - d_bJump[p1*nCities + idx1];
while (bnode < 0) bnode += nCities;
int cand1 = d_gene[p1*nCities + fnode];
int cand2 = d_gene[p1*nCities + bnode];
// find candidates from parent 2
int idx2 = d_orderOfCity[p2*nCities + lastCity];
fnode = (idx2 + d_fJump[p2*nCities + idx2]) % nCities;
bnode = idx2 - d_bJump[p2*nCities + idx2];
while (bnode < 0) bnode += nCities;
int cand3 = d_gene[p2*nCities + fnode];
int cand4 = d_gene[p2*nCities + bnode];
// select best candidate
float dx = d_cityLoc[cand1 * 2] - d_cityLoc[lastCity * 2];
float dy = d_cityLoc[cand1 * 2 + 1] - d_cityLoc[lastCity * 2 + 1];
int dist1 = (int)(dx*dx + dy*dy);
dx = d_cityLoc[cand2 * 2] - d_cityLoc[lastCity * 2];
dy = d_cityLoc[cand2 * 2 + 1] - d_cityLoc[lastCity * 2 + 1];
int dist2 = (int)(dx*dx + dy*dy);
dx = d_cityLoc[cand3 * 2] - d_cityLoc[lastCity * 2];
dy = d_cityLoc[cand3 * 2 + 1] - d_cityLoc[lastCity * 2 + 1];
int dist3 = (int)(dx*dx + dy*dy);
dx = d_cityLoc[cand4 * 2] - d_cityLoc[lastCity * 2];
dy = d_cityLoc[cand4 * 2 + 1] - d_cityLoc[lastCity * 2 + 1];
int dist4 = (int)(dx*dx + dy*dy);
int best = dist1;
int bestCity = cand1;
if (dist2 < best) { best = dist2; bestCity = cand2; }
if (dist3 < best) { best = dist3; bestCity = cand3; }
if (dist4 < best) { best = dist4; bestCity = cand4; }
// set the bestCity as the next element of the offspring
//printf("bestCity = %d\n", bestCity);
d_gene[child*nCities + i] = bestCity;
// invalidate nextCity;
idx1 = d_orderOfCity[p1*nCities + bestCity];
int fChange = idx1 - d_bJump[p1*nCities + idx1];
while (fChange < 0) fChange += nCities;
int bChange = (idx1 + d_fJump[p1*nCities + idx1]) % nCities;
d_fJump[p1*nCities + fChange] += d_fJump[p1*nCities + idx1];
d_bJump[p1*nCities + bChange] += d_bJump[p1*nCities + idx1];
idx2 = d_orderOfCity[p2*nCities + bestCity];
fChange = idx2 - d_bJump[p2*nCities + idx2];
while (fChange < 0) fChange += nCities;
bChange = (idx2 + d_fJump[p2*nCities + idx2]) % nCities;
d_fJump[p2*nCities + fChange] += d_fJump[p2*nCities + idx2];
d_bJump[p2*nCities + bChange] += d_bJump[p2*nCities + idx2];
}
void d_crossoverABCSCX(int blocks, dim3 threads, int i, int mode, int nPopulation, int nGroups, int nCities, int *d_gene, float *d_cityLoc, int *d_orderOfCity, int *d_fJump, int *d_bJump) {
d_crossoverABCSCXKernel << < blocks, threads >> > (i, mode, nPopulation, nGroups, nCities, d_gene, d_cityLoc, d_orderOfCity, d_fJump, d_bJump);
}
__global__ void d_crossoverABCSCXKernel(int i, int mode, int nPopulation, int nGroups, int nCities, int *d_gene, float *d_cityLoc, int *d_orderOfCity, int *d_fJump, int *d_bJump) {
// tId: crossover index
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int tGroup = threadIdx.y;
int nMemberOfAGroup = nPopulation / nGroups;
int start = tGroup*nMemberOfAGroup;
int end = (tGroup == nGroups - 1) ? nPopulation - 1 : start + nMemberOfAGroup;
nMemberOfAGroup = end - start;
int nCrossover = nMemberOfAGroup / 4;
if (tId >= nCrossover) return;
int pointA = tId;
pointA = pointA % (nCities / 2);
int pointB = pointA + nCities / 2;
int p1 = start + tId * 2;
int p2 = p1 + 1;
int p;
if (mode < 0 || mode>1) return;
// offspring 1
if (i > pointA && i < pointB) p = mode?p1:p2; else p = mode?p2:p1;
int child = start + nCrossover * (2+mode) + tId;
int lastCity = d_gene[child*nCities + i - 1];
// find candidates from parent
int idx = d_orderOfCity[p*nCities + lastCity];
int fnode = (idx + d_fJump[p*nCities + idx]) % nCities;
int bnode = idx - d_bJump[p*nCities + idx];
while (bnode < 0) bnode += nCities;
int cand1 = d_gene[p*nCities + fnode];
int cand2 = d_gene[p*nCities + bnode];
// select better candidate
float dx = d_cityLoc[cand1 * 2] - d_cityLoc[lastCity * 2];
float dy = d_cityLoc[cand1 * 2 + 1] - d_cityLoc[lastCity * 2 + 1];
int dist1 = (int) (dx*dx + dy*dy);
dx = d_cityLoc[cand2 * 2] - d_cityLoc[lastCity * 2];
dy = d_cityLoc[cand2 * 2 + 1] - d_cityLoc[lastCity * 2 + 1];
int dist2 = (int) (dx*dx + dy*dy);
int best = dist1;
int bestCity = cand1;
if (dist2 < best) { best = dist2; bestCity = cand2; }
// set the bestCity as the next element of the offspring
d_gene[child*nCities + i] = bestCity;
// invalidate nextCity;
int idx1 = d_orderOfCity[p1*nCities + bestCity];
int fChange = idx1 - d_bJump[p1*nCities + idx1];
while (fChange < 0) fChange += nCities;
int bChange = (idx1 + d_fJump[p1*nCities + idx1]) % nCities;
d_fJump[p1*nCities + fChange] += d_fJump[p1*nCities + idx1];
d_bJump[p1*nCities + bChange] += d_bJump[p1*nCities + idx1];
int idx2 = d_orderOfCity[p2*nCities + bestCity];
fChange = idx2 - d_bJump[p2*nCities + idx2];
while (fChange < 0) fChange += nCities;
bChange = (idx2 + d_fJump[p2*nCities + idx2]) % nCities;
d_fJump[p2*nCities + fChange] += d_fJump[p2*nCities + idx2];
d_bJump[p2*nCities + bChange] += d_bJump[p2*nCities + idx2];
}
// reverse gene(gene_idx) : reverse the gene substring from idxA to idxB
void d_reverseSubGene(int blocks, int threads, int gene_idx, int idxA, int idxB, int *d_gene, int nCities) {
d_reverseSubGeneKernel << < blocks, threads >> > (gene_idx, idxA, idxB, d_gene, nCities);
}
__global__ void d_reverseSubGeneKernel(int gene_idx, int idxA, int idxB, int *d_gene, int nCities) {
int half = (idxB - idxA + 1) / 2;
// tId: index within a gene
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId >= half) return;
int t = d_gene[gene_idx*nCities + tId + idxA];
d_gene[gene_idx*nCities + idxA + tId] = d_gene[gene_idx*nCities + idxB - tId];
d_gene[gene_idx*nCities + idxB - tId] = t;
}
// gene fix : move a city
void d_createACityShiftedGene(int blocks, int threads, int nCities, int iCity, int iForMaxGain, int jForMaxGain, int *d_gene, int idx, int *aGene) {
d_createACityShiftedGeneKernel << <blocks, threads >> >(nCities, iCity, iForMaxGain, jForMaxGain, d_gene, idx, aGene);
}
__global__ void d_createACityShiftedGeneKernel(int nCities, int iCity, int iForMaxGain, int jForMaxGain, int *d_gene, int idx, int *aGene) {
// tId: index within a gene
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId >= nCities) return;
if (tId >= iForMaxGain && tId < jForMaxGain) {
aGene[tId] = d_gene[idx*nCities + tId + 1];
}
else if (tId == jForMaxGain) {
aGene[tId] = iCity;
}
else aGene[tId] = d_gene[idx*nCities + tId];
}
void d_copyBack(int blocks, int threads, int nCities, int *d_gene, int idx, int *aGene) {
d_copyBackKernel <<<blocks, threads>>>(nCities, d_gene, idx, aGene);
}
__global__ void d_copyBackKernel(int nCities, int *d_gene, int idx, int *aGene) {
// tId: index within a gene
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId >= nCities) return;
d_gene[idx*nCities + tId] = aGene[tId];
}
|
21,840 | #include "includes.h"
__global__ void make_bins(float *vec, int *bin, const int num_bins, const int n, const float slope, const float intercept)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < n ){
int bin_new_val;
float temp = abs(vec[xIndex]);
if ( temp > (intercept *.000001) ){
bin_new_val=slope * (intercept - temp);
}
else bin_new_val = num_bins;
bin[xIndex]=bin_new_val;
}
} |
21,841 | #include <stdio.h>
#include <sys/time.h>
double CpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
int CpuNormalCal(int* data, const int size) {
int sum = 0;
for (int i = 0; i < size; ++i) {
sum += data[i];
}
return sum;
}
int CpuRecusiveReduce(int* data, const int size) {
//terminal check
if (size == 1) return data[0];
const int stride = size / 2;
for (int i = 0; i < stride; ++i) {
data[i] += data[i + stride];
}
return CpuRecusiveReduce(data, stride);
}
__global__ void GpuReduceNeighbored(int* g_idata, int* g_odata, unsigned int n) {
// thread id in courrent block
unsigned int tid = threadIdx.x;
// id of all thread in grid
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// boundary check
if (idx >= n) return;
// subarray in current block
int *indata = g_idata + blockIdx.x * blockDim.x;
// cal sum
for (int stride = 1; stride < blockDim.x; stride *= 2) {
if ((tid % (2 * stride)) == 0) {
indata[tid] += indata[tid + stride];
}
__syncthreads();
}
//write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = indata[0];
}
__global__ void GpuReduceNeighboredV2(int* g_idata, int* g_odata, unsigned int n) {
// thread id in courrent block
unsigned int tid = threadIdx.x;
// id of all thread in grid
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// boundary check
if (idx >= n) return;
// subarray in current blocks
int *indata = g_idata + blockIdx.x * blockDim.x;
// cal sum
for (int stride = 1; stride < blockDim.x; stride *= 2) {
// convert tid into local array index (in block)
int index = 2 * stride * tid;
if (index < blockDim.x) {
indata[index] += indata[index + stride];
}
__syncthreads();
}
//write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = indata[0];
}
__global__ void GpuReduceInterleaved(int* g_idata, int* g_odata, unsigned int n) {
// thread id in courrent block
unsigned int tid = threadIdx.x;
// id of all thread in grid
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// boundary check
if (idx >= n) return;
// subarray in current block
int *indata = g_idata + blockIdx.x * blockDim.x;
// cal sum
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
indata[tid] += indata[tid + stride];
}
__syncthreads();
}
//write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = indata[0];
}
int main(int argc, char** argv) {
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("device[%d]: %s\n", dev, deviceProp.name);
int block_size = 512;
if (argc > 1) {
block_size = atoi(argv[1]);
}
int size = 1 << 24;
printf("array size: %d\n", size);
dim3 block(block_size, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("kernal size: grid(%d, %d), block(%d, %d)\n", grid.x, grid.y, block.x, block.y);
// alloc mem
size_t bytes = size * sizeof(int);
int* h_idata = (int*)malloc(bytes);
int* h_odata = (int*)malloc(grid.x * sizeof(int));
int* tmp = (int*)malloc(bytes);
// initialize array
for (int i = 0; i < size; ++i) {
h_idata[i] = (int) (rand() & 0xFF);
}
// alloc hbm
int* d_idata = NULL;
int* d_odata = NULL;
cudaMalloc((void**) &d_idata, bytes);
cudaMalloc((void**) &d_odata, grid.x * sizeof(int));
int gpu_sum = 0;
// ------ kernal 1 ------
// copy input data from h to d
cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice);
// cuda kernal cal
double t1 = CpuSecond();
GpuReduceNeighbored<<<grid, block>>>(d_idata, d_odata, size);
double elaps1 = CpuSecond() - t1;
// copy output data from d to h
cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
// cpu cal
for (int i = 0; i < grid.x; ++i) {
gpu_sum += h_odata[i];
}
printf("GpuReduceNeighbored result: %d, kernal elaps: %f\n", gpu_sum, elaps1);
memset(h_odata, 0, grid.x * sizeof(int));
gpu_sum = 0;
// ------ kernal 2 ------
// copy input data from h to d
cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice);
// cuda kernal cal
double t2 = CpuSecond();
GpuReduceNeighboredV2<<<grid, block>>>(d_idata, d_odata, size);
double elaps2 = CpuSecond() - t2;
// copy output data from d to h
cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
// cpu cal
for (int i = 0; i < grid.x; ++i) {
gpu_sum += h_odata[i];
}
printf("GpuReduceNeighboredV2 result: %d, kernal elaps: %f\n", gpu_sum, elaps2);
memset(h_odata, 0, grid.x * sizeof(int));
gpu_sum = 0;
// ------ kernal 3 ------
// copy input data from h to d
cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice);
// cuda kernal cal
double t3 = CpuSecond();
GpuReduceInterleaved<<<grid, block>>>(d_idata, d_odata, size);
double elaps3 = CpuSecond() - t3;
// copy output data from d to h
cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
// cpu cal
for (int i = 0; i < grid.x; ++i) {
gpu_sum += h_odata[i];
}
double elaps_all_3 = CpuSecond() - t3;
printf("GpuReduceInterleaved result: %d, kernal elaps: %f, all elaps: %f\n", gpu_sum, elaps3, elaps_all_3);
memset(h_odata, 0, grid.x * sizeof(int));
gpu_sum = 0;
memcpy(tmp, h_idata, bytes);
// ------ cpu 1 ------
double t4 = CpuSecond();
int cpu_sum1 = CpuNormalCal(tmp, size);
double elaps_all_4 = CpuSecond() - t4;
// ------ cpu 2 ------
double t5 = CpuSecond();
int cpu_sum2 = CpuRecusiveReduce(tmp, size);
double elaps_all_5 = CpuSecond() - t5;
printf("cpu normal result: %d, elaps_all: %f\n", cpu_sum1, elaps_all_4);
printf("cpu recusize result: %d, elaps_all: %f\n", cpu_sum2, elaps_all_5);
// free host mem
free(h_idata);
free(h_odata);
// free gpu hbm
cudaFree(d_idata);
cudaFree(d_odata);
// reset device
cudaDeviceReset();
}
/*
device[0]: Tesla V100-SXM2-32GB
array size: 16777216
kernal size: grid(32768, 1), block(512, 1)
GpuReduceNeighbored result: 2139353471, kernal elaps: 0.000035
GpuReduceNeighboredV2 result: 2139353471, kernal elaps: 0.000017
GpuReduceInterleaved result: 2139353471, kernal elaps: 0.000011, all elaps: 0.000567
cpu normal result: 2139353471, elaps_all: 0.043164
cpu recusize result: 2139353471, elaps_all: 0.042999
*/ |
21,842 | //pass
//--gridDim=[6,10] --blockDim=[13,13]
__constant__ int kernelTemplate[25] = {
0, 1, 2, 3, 4,
29, 30, 31, 32, 33,
58, 59, 60, 61, 62,
87, 88, 89, 90, 91,
116,117,118,119,120 };
__global__ void executeFirstLayer(float *Layer1_Neurons_GPU,float *Layer1_Weights_GPU,float *Layer2_Neurons_GPU)
{
int blockID=blockIdx.x;
int pixelX=threadIdx.x;
int pixelY=threadIdx.y;
int weightBegin=blockID*26;
int windowX=pixelX*2;
int windowY=pixelY*2;
float result=0;
result+=Layer1_Weights_GPU[weightBegin];
++weightBegin;
//for(int i=0;i<25;++i)
//{
// result+=Layer1_Neurons_GPU[(windowY*29+windowX+kernelTemplate[i])+(29*29*blockIdx.y)]*Layer1_Weights_GPU[weightBegin+i];
//}
//result=(1.7159*tanhf(0.66666667*result));
Layer2_Neurons_GPU[(13*13*blockID+pixelY*13+pixelX)+(13*13*6*blockIdx.y)]=result;
}
|
21,843 | // execute by typing nvcc que1.cu
// ./a.out
#include <stdio.h>
#include <cuda.h>
#define N 32
__global__ void initArray(int *arr)
{
int tidx = threadIdx.x + blockDim.x * blockIdx.x;
arr[tidx] = tidx;
}
__global__ void square (int *matrix, int *result, int matrixsize) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int ii = id / matrixsize;
int jj = id % matrixsize;
int index = ii * matrixsize + jj;
for (int kk = 0; kk < matrixsize; ++kk) {
int ix = ii * matrixsize + kk;
int jx = kk * matrixsize + jj;
int r = matrix[ix] * matrix[jx];
printf("Mresult_arr[%d] = %d\n", index, r);
printf("ix = %d; jx = ;\n", ix, jx);
result[index] += kk;
}
}
int main()
{
int *arr;
int *result_arr;
int *d_arr;
int *d_result_arr;
int raw_size = (N * 2);
int size = raw_size * sizeof(int);
arr = (int *)malloc(size);
result_arr = (int *)malloc(size);
cudaMalloc((void **)&d_arr, size);
cudaMalloc((void **)&d_result_arr, size);
initArray<<<raw_size,1>>>(d_arr);
square<<<raw_size,1>>>(d_arr, d_result_arr, raw_size);
cudaMemcpy(result_arr, d_result_arr, size, cudaMemcpyDeviceToHost);
free(arr);
cudaFree(d_arr);
return 0;
}
|
21,844 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__device__ void idxToCoords(const int idx, int *row, int *col, int rows, int cols)
{
*row = idx / rows;
*col = idx % cols;
return;
}
__device__ void coordsToIdx(const int row, const int col, int *idx, int rows, int cols)
{
*idx = row * cols + col;
}
__global__ void conwayThread(char *oldState, char *newState, int rows, int cols)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
//printf("This is thread %d\n", idx);
if (idx >= rows * cols)
return;
int colIdx;
int rowIdx;
int newIdx;
idxToCoords(idx, &rowIdx, &colIdx, rows, cols);
coordsToIdx(rowIdx, colIdx, &newIdx, rows, cols);
//printf("Block: %d, Blockdim: %d, Thread: %d, Overall %d: row %d, col %d, newIdx %d\n", blockIdx.x, blockDim.x, threadIdx.x, idx, rowIdx, colIdx, newIdx);
int numLiveNeighbors = 0;
int tempRow;
int tempCol;
int tempIdx;
//__syncthreads();
//printf("Thread: %d continuing\n", idx);
// check left neighbor
tempRow = rowIdx;
tempCol = colIdx - 1;
if (tempCol < 0)
tempCol = cols - 1;
coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols);
//if(idx == 0)
//printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol);
if (oldState[tempIdx] == 1)
numLiveNeighbors++;
tempRow = rowIdx - 1;
if (tempRow < 0)
tempRow = rows - 1;
tempCol = colIdx - 1;
if (tempCol < 0)
tempCol = cols - 1;
coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols);
//if(idx == 0)
//printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol);
if (oldState[tempIdx] == 1)
numLiveNeighbors++;
tempRow = rowIdx - 1;
if (tempRow < 0)
tempRow = rows - 1;
tempCol = colIdx;
coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols);
//if(idx == 0)
//printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol);
if (oldState[tempIdx] == 1)
numLiveNeighbors++;
tempRow = rowIdx - 1;
if (tempRow < 0)
tempRow = rows - 1;
tempCol = colIdx + 1;
if (tempCol >= cols)
tempCol = 0;
coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols);
//if(idx == 0)
//printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol);
if (oldState[tempIdx] == 1)
numLiveNeighbors++;
tempRow = rowIdx;
tempCol = colIdx + 1;
if (tempCol >= cols)
tempCol = 0;
coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols);
//if(idx == 0)
//printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol);
if (oldState[tempIdx] == 1)
numLiveNeighbors++;
tempRow = rowIdx + 1;
if (tempRow >= rows)
tempRow = 0;
tempCol = colIdx + 1;
if (tempCol >= cols)
tempCol = 0;
coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols);
//if(idx == 0)
//printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol);
if (oldState[tempIdx] == 1)
numLiveNeighbors++;
tempRow = rowIdx + 1;
if (tempRow >= rows)
tempRow = 0;
tempCol = colIdx;
coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols);
//if(idx == 0)
//printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol);
if (oldState[tempIdx] == 1)
numLiveNeighbors++;
tempRow = rowIdx + 1;
if (tempRow >= rows)
tempRow = 0;
tempCol = colIdx - 1;
if (tempCol < 0)
tempCol = cols - 1;
coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols);
//if(idx == 0)
//printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol);
if (oldState[tempIdx] == 1)
numLiveNeighbors++;
if (oldState[idx] == 1)
{
if (numLiveNeighbors < 2 || numLiveNeighbors > 3)
{
newState[idx] = 0;
}
else
{
newState[idx] = 1;
}
}
else
{
if (numLiveNeighbors == 3)
{
newState[idx] = 1;
}
else
{
newState[idx] = 0;
}
}
//printf("Cell %d has %d live neighbors\n", idx, numLiveNeighbors);
}
void printBoard(char *board, int rows, int cols)
{
int counter = 0;
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
if (board[counter] == 0)
printf("-");
else
printf("0");
counter++;
}
printf("\n");
}
return;
}
int main()
{
//const int arraySize = 5;
//const int a[arraySize] = { 1, 2, 3, 4, 5 };
//const int b[arraySize] = { 10, 20, 30, 40, 50 };
//int c[arraySize] = { 0 };
const int iterations = 100;
const int rows = 256;
const int cols = 256;
const int boardSize = rows * cols;
char prevState[boardSize];
char nextState[boardSize];
char *gpu_prevState = 0;
char *gpu_nextState = 0;
srand(0);
for (int i = 0; i < boardSize; i++)
prevState[i] = rand() % 2;
printf("Beginning state:\n");
printBoard(prevState, rows, cols);
cudaError_t errors;
errors = cudaSetDevice(0);
cudaDeviceProp props;
errors = cudaGetDeviceProperties(&props, 0);
int nBlocks;
//printf("Max threads: %d\n", props.maxThreadsPerBlock);
int temp = (boardSize + (props.maxThreadsPerBlock - (boardSize % props.maxThreadsPerBlock)));
//printf("Temp: %d\n", temp);
if ((boardSize % props.maxThreadsPerBlock) != 0)
nBlocks = (boardSize + (props.maxThreadsPerBlock - (boardSize % props.maxThreadsPerBlock))) / props.maxThreadsPerBlock;
else
nBlocks = boardSize / props.maxThreadsPerBlock;
//printf("Blocks: %d\n", nBlocks);
if (errors != cudaSuccess)
{
printf("Error setting device\n");
exit(0);
}
errors = cudaMalloc((void **)&gpu_prevState, boardSize * sizeof(char));
if (errors != cudaSuccess)
{
printf("Error allocating previous state\n");
exit(0);
}
errors = cudaMalloc((void **)&gpu_nextState, boardSize * sizeof(char));
if (errors != cudaSuccess)
{
printf("Error allocating next state\n");
exit(0);
}
errors = cudaMemcpy(gpu_prevState, prevState, boardSize * sizeof(char), cudaMemcpyHostToDevice);
if (errors != cudaSuccess)
{
printf("Error copying previous state\n");
exit(0);
}
errors = cudaMemcpy(gpu_nextState, nextState, boardSize * sizeof(char), cudaMemcpyHostToDevice);
if (errors != cudaSuccess)
{
printf("Error copying next state\n");
exit(0);
}
for (int i = 0; i < iterations; i++)
{
//printf("On iteration %d\n", i);
conwayThread <<<nBlocks * 4, props.maxThreadsPerBlock / 4>>>(gpu_prevState, gpu_nextState, rows, cols);
errors = cudaGetLastError();
if (errors != cudaSuccess)
{
printf("Error launching kernel\n");
printf("%s\n", cudaGetErrorString(errors));
exit(0);
}
errors = cudaDeviceSynchronize();
if (errors != cudaSuccess)
{
printf("Error synchronizing device\n");
exit(0);
}
// Copy through host
//cudaMemcpy(nextState, gpu_nextState, boardSize * sizeof(char), cudaMemcpyDeviceToHost);
//cudaMemcpy(gpu_prevState, nextState, boardSize * sizeof(char), cudaMemcpyHostToDevice);
// Copy through device
cudaMemcpy(gpu_prevState, gpu_nextState, boardSize * sizeof(char), cudaMemcpyDeviceToDevice);
}
cudaMemcpy(nextState, gpu_nextState, boardSize * sizeof(char), cudaMemcpyDeviceToHost);
printf("Final state\n");
printBoard(nextState, rows, cols);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
errors = cudaDeviceReset();
if (errors != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
|
21,845 | /* ECGR 6090 Heterogeneous Computing Homework0
Problem 2- 1D stencil using GPU
Written by Bhavin Thakar - 801151488
*/
// To execute the program type: ./1DstencilGPU
#include<stdio.h>
#include <sys/time.h>
#include<stdlib.h>
struct timeval stop, start,start1,stop1,start2, stop2;
#define R 16 // Define Radius
#define B 128 // Define Thread block size
#define N 10000 // Define number of elements in array
// Kernel Function
__global__ void stencil1d(int *in, int *out){
int gindex=threadIdx.x+(blockIdx.x*blockDim.x) + R;
int result=0;
for (int offset = -R; offset <= R ; offset++){
result += in[gindex + offset];
}
out[gindex-R]=result;
}
// Function to generate random numbers and adding it to an integer array
void random(int *a, int n ){
int i;
for (i = 0; i <=n+1; ++i)
a[i] = rand()%100; // generating integer values from 0 to 100
}
int main(void){
int n;
int *c_in, *c_out; // Declaring integer array for CPU
int size= N*sizeof(int); // SIZE = N*4bytes(int)
n=N+2*R;
// Allocating memory for CPU integer array
c_in=(int*)malloc(n*size);
c_out=(int*)malloc(N*size);
gettimeofday(&start1, NULL);
random(c_in,n); // Calling random function
int *d_in,*d_out; // Declaring integer array for GPU
// Allocating memory for GPU integer array
cudaMalloc(&d_in,n*size);
cudaMalloc(&d_out,N*size);
// Copying inputs from CPU to GPU
cudaMemcpy(d_in,c_in,n*size,cudaMemcpyHostToDevice);
gettimeofday(&stop1, NULL);
gettimeofday(&start, NULL);
stencil1d<<<(N/B-1)/B,B>>>(d_in,d_out); // Calling kernel function
gettimeofday(&stop, NULL);
cudaDeviceSynchronize(); // Checking if all the streams is completed successfully
printf("Execution time of kernel: %lu us\n", (stop.tv_sec - start.tv_sec) * 1000000 + stop.tv_usec - start.tv_usec);
gettimeofday(&start2, NULL);
// Copying results from GPU to CPU
cudaMemcpy(c_out,d_out,n*size,cudaMemcpyDeviceToHost);
gettimeofday(&stop2, NULL);
// Calculating the Overhead data transfer execution time
unsigned int i;
i=(stop1.tv_sec - start1.tv_sec )* 1000000 + (stop1.tv_usec - start1.tv_usec);
i=i+((stop2.tv_sec - start2.tv_sec )* 1000000 + (stop2.tv_usec - start2.tv_usec));
printf("Execution time for data transfer: %lu us\n", i);
// Free resources
free(c_in);
free(c_out);
cudaFree(d_in);
cudaFree(d_out);
return 0;
} |
21,846 | #include <stdio.h>
__global__
void revert(int n, float* a, float *b) {
*b = - (*a);
*b = 1.05;
}
__global__
void getmax(int n, float* a, float* b) {
*b = 1.5;
}
int main() {
float* a, *b, *a_d, *b_d;
a = (float*)malloc(sizeof(float));
b = (float*)malloc(sizeof(float));
*a = 5.2;
printf("a:%.2f\n", *a);
*b = 0;
cudaMalloc((void**)&a_d, sizeof(float));
cudaMalloc((void**)&b_d, sizeof(float));
cudaMemcpy(a_d, a, sizeof(float), cudaMemcpyHostToDevice);
// *b_d = *a_d;
int N = 1;
getmax<<<(N+255)/256, 256>>>(N, a_d, b_d);
cudaMemcpy(b, b_d, sizeof(float), cudaMemcpyDeviceToHost);
// *a = -(*a);
// cudaMemcpy(b_d, a, sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(b, b_d, sizeof(float), cudaMemcpyDeviceToHost);
printf("a:%.2f, b:%.2f\n", *a, *b);
return 0;
}
// __global__
// void saxpy(int n, float a, float *x, float *y)
// {
// int i = blockIdx.x*blockDim.x + threadIdx.x;
// if (i < n) y[i] = a*x[i] + y[i];
// }
// int main(void)
// {
// int N = 1<<20;
// float *x, *y, *d_x, *d_y;
// x = (float*)malloc(N*sizeof(float));
// y = (float*)malloc(N*sizeof(float));
// cudaMalloc(&d_x, N*sizeof(float));
// cudaMalloc(&d_y, N*sizeof(float));
// for (int i = 0; i < N; i++) {
// x[i] = 1.0f;
// y[i] = 2.0f;
// }
// cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// // Perform SAXPY on 1M elements
// saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
// cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
// float maxError = 0.0f;
// for (int i = 0; i < N; i++)
// maxError = max(maxError, abs(y[i]-4.0f));
// printf("Max error: %f\n", maxError);
// cudaFree(d_x);
// cudaFree(d_y);
// free(x);
// free(y);
// } |
21,847 | #include "includes.h"
__device__ unsigned char clip_rgb_gpu(int x)
{
if(x > 255)
return 255;
if(x < 0)
return 0;
return (unsigned char)x;
}
__global__ void yuv2rgb_gpu_son(unsigned char * d_y , unsigned char * d_u ,unsigned char * d_v , unsigned char * d_r, unsigned char * d_g, unsigned char * d_b, int size)
{
int x = threadIdx.x + blockDim.x*blockIdx.x;
if (x >= size) return;
int rt,gt,bt;
int y, cb, cr;
y = ((int)d_y[x]);
cb = ((int)d_u[x]) - 128;
cr = ((int)d_v[x]) - 128;
rt = (int)( y + 1.402*cr);
gt = (int)( y - 0.344*cb - 0.714*cr);
bt = (int)( y + 1.772*cb);
d_r[x] = clip_rgb_gpu(rt);
d_g[x] = clip_rgb_gpu(gt);
d_b[x] = clip_rgb_gpu(bt);
} |
21,848 | #include <stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#define N 10000
#define M 10000
#define K 10000
__global__ void matrix_mul_coal(float *a, float *b, float *c) {
int row = blockIdx.y* blockDim.y+ threadIdx.y;
int col = blockIdx.x* blockDim.x+ threadIdx.x;
float temp = 0.0; //calculate sum
for (int k = 0; k < K; k++)
{
temp += a[row * K + k] + b[k * K + col]; //add and multiply
}
c[row * K + col] = temp; //final c matrix
}
//Function to initialize matrices with random values
void randomInit (float *data, int size)
{
for (int i = 0; i < size; i++)
for (int j = 0; j < size; j++)
*(data + i * size + j) = rand() % 1024;
}
int main(void)
{
float *a, *b, *c, *bt; //CPU copies
float *d_a, *d_b, *d_c; //GPU copies
int matrix_size = N * M * sizeof(float);
cudaEvent_t start, stop,start1,stop1,start2,stop2;
float time,time1,time2;
cudaEventCreate(&start);
cudaEventCreate(&start1);
cudaEventCreate(&start2);
cudaEventCreate(&stop);
cudaEventCreate(&stop1);
cudaEventCreate(&stop2);
//Allocate CPU memory
a = (float *) malloc(matrix_size); randomInit(a, N);
b = (float *) malloc(matrix_size); randomInit(b, M);
bt = (float *) malloc(matrix_size);
c = (float *) malloc(matrix_size);
for (int i = 0; i < M; i++)
for (int j = 0; j < M; j++)
*(bt + i * M + j) = *(b + j * M + i);
//Allocate GPU memory
cudaMalloc((void **) &d_a, matrix_size);
cudaMalloc((void **) &d_b, matrix_size);
cudaMalloc((void **) &d_c, matrix_size);
cudaEventRecord( start1, 0 );
//Copy from CPU memory to GPU memory
cudaMemcpy( d_a, a, matrix_size, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, bt, matrix_size, cudaMemcpyHostToDevice);
cudaEventRecord( stop1, 0 );
cudaEventSynchronize(stop1);
cudaEventElapsedTime( &time1, start1, stop1 );
cudaEventDestroy( start1);
cudaEventDestroy( stop1);
//Set thread and grid dimensions
dim3 tBlock(16, 16);
dim3 Grid((N + 16 - 1)/tBlock.x, (M + 16 - 1)/tBlock.y);
cudaEventRecord( start, 0 );
//Call kernels
matrix_mul_coal<<< Grid, tBlock >>> (d_a, d_b, d_c);
cudaEventRecord( stop, 0 );
cudaEventSynchronize(stop);
cudaEventElapsedTime( &time, start, stop );
printf("GPU Execution Time without memory transfer= %f\n",time);
cudaEventDestroy( start );
cudaEventDestroy( stop );
//Copy from device to host
cudaEventRecord( start2, 0 );
cudaMemcpy( c, d_c, matrix_size, cudaMemcpyDeviceToHost);
cudaEventRecord( stop2, 0 );
cudaEventElapsedTime( &time2, start2, stop2 );
cudaEventDestroy( start2 );
cudaEventDestroy( stop2 );
float tTime=time+time1+time2;
printf("GPU Execution Time with memory transfer: %f\n",tTime);
//free cpu and gpu memory
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} |
21,849 | #include<stdio.h>
#include<stdlib.h>
#include<cuda_runtime.h>
__global__ void histo_kernel(int* d_out, int* d_in, int out_size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int id_temp = d_in[idx];
int my_idx = id_temp % out_size;
atomicAdd(&(d_out[my_idx]), 1);
}
int main(int argc, char** argv)
{
int ARRAY_SIZE = 4096; //65536
int out_size = 16;
int SIZE = ARRAY_SIZE * sizeof(int);
int h_in[ARRAY_SIZE];
int h_out[out_size];
for (int i = 0; i < ARRAY_SIZE; i++){
h_in[i] = i; //bit_reverse(i, log2(ARRAY_SIZE))
}
for (int i = 0; i < out_size; i++){
h_out[i] = 0; //bit_reverse(i, log2(ARRAY_SIZE))
}
int* d_in;
int* d_out;
cudaMalloc((void**) &d_in, SIZE);
cudaMalloc((void**) &d_out, out_size * sizeof(int));
cudaMemcpy(d_in, h_in, SIZE, cudaMemcpyHostToDevice);
int threads = 1024;
int blocks;
if (ARRAY_SIZE % threads == 0){
blocks = int(ARRAY_SIZE / threads);
}else{
blocks = int(ARRAY_SIZE / threads) + 1;
}
printf("threads num: %d; blocks num: %d\n",threads, blocks);
histo_kernel<<<blocks, threads>>>(d_out, d_in, out_size);
cudaMemcpy(h_out, d_out, out_size * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < out_size; i++){
printf("Count %d: %d\n", i, h_out[i]);
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
21,850 | #include<stdio.h>
#include<math.h>
// #include<omp.h>
#define SIZE 1024
__global__ void min(int * A, int * C)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
A[2*i]<A[2*i+1]?C[i]=A[2*i]:C[i]=A[2*i+1];
}
int main()
{
int A[SIZE];
int *devA,*devC;
//double start,end;
for(int j=0;j<SIZE;j++)
{
A[j]=SIZE-j;
}
cudaMalloc((void **)&devA,SIZE*sizeof(int));
cudaMalloc((void **)&devC,SIZE*sizeof(int));
//start=omp_get_wtime();
//printf("\nStart time:%f",start);
for(int j=1;j<log2((double)SIZE);j++)
{
cudaMemcpy(devA,A,SIZE*sizeof(int),cudaMemcpyHostToDevice);
min<<<1,SIZE/pow(2,j)>>>(devA,devC);
cudaMemcpy(&A,devC,SIZE*sizeof(int),cudaMemcpyDeviceToHost);
}
//end=omp_get_wtime();
//printf("\nEnd time:%f",end);
//printf("\nTotal time:%f\n",end-start);
A[0]<A[1]?printf("\nMin is:%d\n",A[0]):printf("\nMin is:%d\n",A[1]);
cudaFree(devA);
cudaFree(devC);
return 0;
}
|
21,851 | #include "includes.h"
__device__ void finish(unsigned int* counter) {
__syncthreads();
__threadfence();
if (threadIdx.x == 0) { atomicAdd(counter, 1); }
}
__global__ void GRUPrepare(unsigned int* finished, const int round) {
for (int i = 0; i < round; i++) { finished[i] = 0; }
} |
21,852 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
cudaError_t searchPattern(char *string,char*pat,int* res);
__global__ void searchPatternKernel(const char *string,const char*pat,int patlen,int segstringlen,int lastsegstringlen,int *res)
{
int startIndex =threadIdx.x;
printf("%d\n",startIndex);
int pos;//the pos of this segment
//divide into 4 segment 0123
pos = startIndex*segstringlen;
printf("pos:%d\n",pos);
/*if (string[i]==pat[0])
{
int j;
for(j=1;j<4;j++)
{
if (string[i+j]!=pat[j])break;
else res[i]=1;
}
}*/
int strl;//the length of the segment
if (startIndex<3)
{
strl=segstringlen+patlen;
}
else strl=lastsegstringlen;
printf("%d\n",strl);
printf("pati :%s %d\n",pat,startIndex);
printf("string i:%s %d\n",string,startIndex);
int i;
for (i=pos;i<pos+strl-patlen+1;i++)
{
int flag=1;
int j;
for (j=0;j<patlen;j++)
{
if (pat[j]!=string[j+i]){flag=0;break;}
}
if(flag)res[i]=1;
}
}
int main()
{
/*const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);*/
char string[]="wefdfewfjwfbhwyfghwqfbhweyhwefhwefewbfwfhbwuw";
char pat[]="wef";
int const datasize= strlen(string);
int* res;
res=(int*)malloc((datasize)* sizeof(int));
memset(res, 0, datasize);
//searchPattern(char* string, char*pat)
cudaError_t cudaStatus = searchPattern(string, pat,res);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "searchPattern failed!");
return 1;
}
// Print out the string match result position
int total_matches = 0;
for (int i=0; i<datasize; i++) {
if (res[i] == 1) {
printf("Character found at position % i\n", i);
total_matches++;
}
}
printf("Total matches = %d\n", total_matches);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
system("pause");
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
cudaError_t searchPattern(char* string, char*pat,int *res)
{
char *dev_string = 0;
char *dev_pat = 0;
int *dev_res = 0;
int stringlen=strlen(string);
int patlen=strlen(pat);
printf("%d\n",patlen);
int segstrlen=stringlen/4;
printf("%d\n",segstrlen);
int lastsegstrlen= segstrlen+stringlen%4;
printf("%d\n",lastsegstrlen);
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_string,(stringlen)* sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_pat, (patlen) * sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_res, (stringlen) * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_string, string, (stringlen) * sizeof(char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy1 failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_pat, pat, (patlen)* sizeof(char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy2 failed!");
goto Error;
}
cudaStatus = cudaMemset(dev_res, 0, (stringlen)* sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
//searchPatternKernel<<<1, size>>>(dev_res, dev_string, dev_pat);
searchPatternKernel<<<1,4>>>(dev_string,dev_pat,patlen,segstrlen,lastsegstrlen,dev_res);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "PatternKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
// cudaStatus = cudaDeviceSynchronize();
cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching searchpatternKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(res, dev_res, (stringlen) * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_res);
cudaFree(dev_pat);
cudaFree(dev_string);
return cudaStatus;
}
|
21,853 | #include "includes.h"
__global__ void g_getSmrWgrad(float* wgrad, float* weight, float lambda, int len, int batch)
{
for(int i = 0; i < len; i += blockDim.x)
{
int id = i + threadIdx.x;
if(id < len)
{
wgrad[id] = lambda * weight[id] + wgrad[id] / batch;
}
}
} |
21,854 | // TODO: Implement FriedelMixed, other 2 are done (Friedel and noFriedel)
#include <stdio.h>
#include <sys/time.h>
#include <stdint.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#define RealType double
// conversions constants
#define deg2rad 0.0174532925199433
#define rad2deg 57.2957795130823
#define EPS 1E-12
#define CalcNorm3(x,y,z) sqrt((x)*(x) + (y)*(y) + (z)*(z))
#define CalcNorm2(x,y) sqrt((x)*(x) + (y)*(y))
// max array sizes
#define MAX_N_SPOTS 6000000 // max nr of observed spots that can be stored
#define MAX_N_STEPS 1000 // Max nr of pos steps, when stepping along the diffracted ray
#define MAX_N_OR 36000 // max nr of trial orientations that can be stored (360/0.01);
#define MAX_N_MATCHES 1 // max nr of grain matches for 1 spot
#define MAX_N_RINGS 500 // max nr of rings that can be stored (applies to the arrays ringttheta, ringhkl, etc)
#define MAX_N_HKLS 500 // max nr of hkls that can be stored
#define MAX_N_OMEGARANGES 72 // max nr of omegaranges in input file (also max no of box sizes)
#define N_COL_THEORSPOTS 8 // number of items that is stored for each calculated spot (omega, eta, etc)
#define N_COL_OBSSPOTS 9 // number of items stored for each obs spots
#define N_COL_GRAINSPOTS 17 // nr of columns for output: y, z, omega, differences for spots of grain matches
#define N_COL_GRAINMATCHES 16 // nr of columns for output: the Matches (summary)
#define MAX_LINE_LENGTH 4096
#define MAX_N_FRIEDEL_PAIRS 1000
#define MAX_N_EVALS 1000
#define N_COLS_FRIEDEL_RESULTS 16
#define N_COLS_ORIENTATION_NUMBERS 3
#define MaxNSpotsBest 10
__device__ typedef RealType (*nlopt_func)(int n, RealType *x, void *func_data);
typedef enum {
NLOPT_FAILURE = -1, /* generic failure code */
NLOPT_INVALID_ARGS = -2,
NLOPT_OUT_OF_MEMORY = -3,
NLOPT_ROUNDOFF_LIMITED = -4,
NLOPT_FORCED_STOP = -5,
NLOPT_SUCCESS = 1, /* generic success code */
NLOPT_STOPVAL_REACHED = 2,
NLOPT_FTOL_REACHED = 3,
NLOPT_XTOL_REACHED = 4,
NLOPT_MAXEVAL_REACHED = 5,
NLOPT_MAXTIME_REACHED = 6
} nlopt_result;
typedef struct {
unsigned n;
RealType minf_max;
RealType ftol_rel;
RealType xtol_rel;
int nevals, maxeval;
} nlopt_stopping;
__device__ int relstop(RealType vold, RealType vnew, RealType reltol)
{
if (vold != vold) return 0;
return(fabs(vnew - vold) < reltol * (fabs(vnew) + fabs(vold)) * 0.5
|| (reltol > 0 && vnew == vold));
}
__device__ int nlopt_stop_ftol(const nlopt_stopping *s, RealType f, RealType oldf)
{
return (relstop(oldf, f, s->ftol_rel));
}
__device__ int nlopt_stop_f(const nlopt_stopping *s, RealType f, RealType oldf)
{
return (f <= s->minf_max || nlopt_stop_ftol(s, f, oldf));
}
__device__ int nlopt_stop_x(const nlopt_stopping *s, const RealType *x, const RealType *oldx)
{
unsigned i;
for (i = 0; i < s->n; ++i)
if (!relstop(oldx[i], x[i], s->xtol_rel))
return 0;
return 1;
}
__device__ int nlopt_stop_dx(const nlopt_stopping *s, const RealType *x, const RealType *dx)
{
unsigned i;
for (i = 0; i < s->n; ++i)
if (!relstop(x[i] - dx[i], x[i], s->xtol_rel))
return 0;
return 1;
}
__device__ int nlopt_stop_evals(const nlopt_stopping *s)
{
return (s->maxeval > 0 && s->nevals >= s->maxeval);
}
#define NLOPT_MINF_MAX_REACHED NLOPT_STOPVAL_REACHED
/* return 1 if a and b are approximately equal relative to floating-point
precision, 0 otherwise */
__device__ int close(RealType a, RealType b)
{
return (fabs(a - b) <= 1e-13 * (fabs(a) + fabs(b)));
}
__device__ int reflectpt(int n, RealType *xnew,
const RealType *c, RealType scale, const RealType *xold,
const RealType *lb, const RealType *ub)
{
int equalc = 1, equalold = 1, i;
for (i = 0; i < n; ++i) {
RealType newx = c[i] + scale * (c[i] - xold[i]);
if (newx < lb[i]) newx = lb[i];
if (newx > ub[i]) newx = ub[i];
equalc = equalc && close(newx, c[i]);
equalold = equalold && close(newx, xold[i]);
xnew[i] = newx;
}
return !(equalc || equalold);
}
#define CHECK_EVAL(xc,fc) \
stop->nevals++; \
if ((fc) <= *minf) { \
*minf = (fc); memcpy(x, (xc), n * sizeof(RealType)); \
if (*minf < stop->minf_max) { ret=NLOPT_MINF_MAX_REACHED; goto done; } \
} \
if (nlopt_stop_evals(stop)) { ret=NLOPT_MAXEVAL_REACHED; goto done; } \
__device__ nlopt_result nldrmd_minimize_(int n, nlopt_func f, void *f_data,
const RealType *lb, const RealType *ub, /* bounds */
RealType *x, /* in: initial guess, out: minimizer */
RealType *minf,
const RealType *xstep, /* initial step sizes */
nlopt_stopping *stop,
RealType psi, RealType *scratch,
RealType *fdiff)
{
RealType *pts; /* (n+1) x (n+1) array of n+1 points plus function val [0] */
RealType *c; /* centroid * n */
RealType *xcur; /* current point */
int i, j;
RealType ninv = 1.0 / n;
nlopt_result ret = NLOPT_SUCCESS;
RealType init_diam = 0;
RealType *highi;
pts = scratch;
c = scratch + (n+1)*(n+1);
xcur = c + n;
*fdiff = HUGE_VAL;
/* initialize the simplex based on the starting xstep */
for (i=0;i<n;i++) pts[1+i] = x[i];
//memcpy(pts+1, x, sizeof(RealType)*n);
pts[0] = *minf;
if (*minf < stop->minf_max) { ret=NLOPT_MINF_MAX_REACHED; goto done; }
for (i = 0; i < n; ++i) {
RealType *pt = pts + (i+1)*(n+1);
for (j=0;j<n;j++) pt[1+j] = x[j];
//memcpy(pt+1, x, sizeof(RealType)*n);
pt[1+i] += xstep[i];
if (pt[1+i] > ub[i]) {
if (ub[i] - x[i] > fabs(xstep[i]) * 0.1)
pt[1+i] = ub[i];
else /* ub is too close to pt, go in other direction */
pt[1+i] = x[i] - fabs(xstep[i]);
}
if (pt[1+i] < lb[i]) {
if (x[i] - lb[i] > fabs(xstep[i]) * 0.1)
pt[1+i] = lb[i];
else {/* lb is too close to pt, go in other direction */
pt[1+i] = x[i] + fabs(xstep[i]);
if (pt[1+i] > ub[i]) /* go towards further of lb, ub */
pt[1+i] = 0.5 * ((ub[i] - x[i] > x[i] - lb[i] ?
ub[i] : lb[i]) + x[i]);
}
}
if (close(pt[1+i], x[i])) { ret=NLOPT_FAILURE; goto done; }
pt[0] = f(n, pt+1, f_data);
CHECK_EVAL(pt+1, pt[0]);
}
restart:
for (i = 0; i < n + 1; ++i)
// Create list to have f(x) and x values, it doesn't need to be a sorted list.
// This could be avoided by using pts to calculate high and low.
while (1) {
RealType fl = pts[0], *xl = pts + 1;
RealType fh = pts[0], *xh = pts + 1;
highi = pts;
for (i = 1; i < n+1; ++i){
if (fl < pts[i*(n+1)]){
fl = pts[i*(n+1)];
xl = pts + i*(n+1) + 1;
}
if (fh > pts[i*(n+1)]){
fh = pts[i*(n+1)];
xh = pts + i*(n+1) + 1;
highi = pts + i*(n+1);
}
}
RealType fr;
*fdiff = fh - fl;
if (init_diam == 0) /* initialize diam. for psi convergence test */
for (i = 0; i < n; ++i) init_diam += fabs(xl[i] - xh[i]);
if (psi <= 0 && nlopt_stop_ftol(stop, fl, fh)) {
ret = NLOPT_FTOL_REACHED;
goto done;
}
/* compute centroid */
memset(c, 0, sizeof(RealType)*n);
for (i = 0; i < n + 1; ++i) {
RealType *xi = pts + i*(n+1) + 1;
if (xi != xh)
for (j = 0; j < n; ++j)
c[j] += xi[j];
}
for (i = 0; i < n; ++i) c[i] *= ninv;
/* x convergence check: find xcur = max radius from centroid */
memset(xcur, 0, sizeof(RealType)*n);
for (i = 0; i < n + 1; ++i) {
RealType *xi = pts + i*(n+1) + 1;
for (j = 0; j < n; ++j) {
RealType dx = fabs(xi[j] - c[j]);
if (dx > xcur[j]) xcur[j] = dx;
}
}
for (i = 0; i < n; ++i) xcur[i] += c[i];
if (psi > 0) {
RealType diam = 0;
for (i = 0; i < n; ++i) diam += fabs(xl[i] - xh[i]);
if (diam < psi * init_diam) {
ret = NLOPT_XTOL_REACHED;
goto done;
}
}
else if (nlopt_stop_x(stop, c, xcur)) {
ret = NLOPT_XTOL_REACHED;
goto done;
}
/* reflection */
if (!reflectpt(n, xcur, c, 1.0, xh, lb, ub)) {
ret=NLOPT_XTOL_REACHED; goto done;
}
fr = f(n, xcur, f_data);
CHECK_EVAL(xcur, fr);
if (fr < fl) { /* new best point, expand simplex */
if (!reflectpt(n, xh, c, 2.0, xh, lb, ub)) {
ret=NLOPT_XTOL_REACHED; goto done;
}
fh = f(n, xh, f_data);
CHECK_EVAL(xh, fh);
if (fh >= fr) { /* expanding didn't improve */
fh = fr;
memcpy(xh, xcur, sizeof(RealType)*n);
}
}
else if (fr < fh){//rb_tree_pred(high)->k[0]) { /* accept new point */ // how is this done is unclear
memcpy(xh, xcur, sizeof(RealType)*n);
fh = fr;
}
else { /* new worst point, contract */
RealType fc;
if (!reflectpt(n,xcur,c, fh <= fr ? -0.5 : 0.5, xh, lb,ub)) {
ret=NLOPT_XTOL_REACHED; goto done;
}
fc = f(n, xcur, f_data);
CHECK_EVAL(xcur, fc);
if (fc < fr && fc < fh) { /* successful contraction */
memcpy(xh, xcur, sizeof(RealType)*n);
fh = fc;
}
else { /* failed contraction, shrink simplex */
for (i = 0; i < n+1; ++i) {
RealType *pt = pts + i * (n+1);
if (pt+1 != xl) {
if (!reflectpt(n,pt+1, xl,-0.5,pt+1, lb,ub)) {
ret = NLOPT_XTOL_REACHED;
goto done;
}
pt[0] = f(n, pt+1, f_data);
CHECK_EVAL(pt+1, pt[0]);
}
}
goto restart;
}
}
*highi = fh;
}
done:
return ret;
}
__device__ nlopt_result nldrmd_minimize(int n, nlopt_func f, void *f_data,
const RealType *lb, const RealType *ub, /* bounds */
RealType *x, /* in: initial guess, out: minimizer */
RealType *minf,
const RealType *xstep, /* initial step sizes */
nlopt_stopping *stop, RealType *scratch)
{
nlopt_result ret;
RealType fdiff;
*minf = f(n, x, f_data);
stop->nevals++;
if (*minf < stop->minf_max) return NLOPT_MINF_MAX_REACHED;
if (nlopt_stop_evals(stop)) return NLOPT_MAXEVAL_REACHED;
ret = nldrmd_minimize_(n, f, f_data, lb, ub, x, minf, xstep, stop,
0.0, scratch, &fdiff);
return ret;
}
//END NLOPT NELDERMEAD
//BEGIN NLDRMD FUNCTION scratch space: 3n+(n+1)*(n+1)
__device__ void nelmin ( RealType fn ( int n_fun, RealType *x, void *data ),
int n, RealType *start, RealType *xmin,
RealType *lb, RealType *ub, RealType *scratch, RealType *ynewlo,
RealType reqmin, RealType *step, int konvge, int kcount,
int *icount, int *numres, int *ifault, void *data_t){
RealType ccoeff = 0.5;
RealType del;
RealType dn;
RealType dnn;
RealType ecoeff = 2.0;
RealType eps = 0.001;
int i;
int ihi;
int ilo;
int j;
int jcount;
int l;
int nn;
RealType *p;
RealType *p2star;
RealType *pbar;
RealType *pstar;
RealType rcoeff = 1.0;
RealType rq;
RealType x;
RealType *y;
RealType y2star;
RealType ylo;
RealType ystar;
RealType z;
/*
Check the input parameters.
*/
if ( reqmin <= 0.0 )
{
*ifault = 1;
return;
}
if ( n < 1 )
{
*ifault = 1;
return;
}
if ( konvge < 1 )
{
*ifault = 1;
return;
}
p = scratch;
pstar = p + n*(n+1);
p2star = pstar + n;
pbar = p2star + n;
y = pbar + n;
*icount = 0;
*numres = 0;
jcount = konvge;
dn = ( RealType ) ( n );
nn = n + 1;
dnn = ( RealType ) ( nn );
del = 1.0;
rq = reqmin * dn;
/*
Initial or restarted loop.
*/
for ( ; ; )
{
for ( i = 0; i < n; i++ )
{
p[i+n*n] = start[i];
}
y[n] = fn ( n, start, data_t );
*icount = *icount + 1;
for ( j = 0; j < n; j++ )
{
x = start[j];
start[j] = start[j] + step[j] * del;
if (start[j] < lb[j]) start[j] = lb[j]; // Constraints
if (start[j] > ub[j]) start[j] = ub[j]; // Constraints
for ( i = 0; i < n; i++ )
{
p[i+j*n] = start[i];
}
y[j] = fn ( n, start, data_t );
*icount = *icount + 1;
start[j] = x;
}
/*
The simplex construction is complete.
Find highest and lowest Y values. YNEWLO = Y(IHI) indicates
the vertex of the simplex to be replaced.
*/
ylo = y[0];
ilo = 0;
for ( i = 1; i < nn; i++ )
{
if ( y[i] < ylo )
{
ylo = y[i];
ilo = i;
}
}
/*
Inner loop.
*/
for ( ; ; )
{
if ( kcount <= *icount )
{
break;
}
*ynewlo = y[0];
ihi = 0;
for ( i = 1; i < nn; i++ )
{
if ( *ynewlo < y[i] )
{
*ynewlo = y[i];
ihi = i;
}
}
/*
Calculate PBAR, the centroid of the simplex vertices
excepting the vertex with Y value YNEWLO.
*/
for ( i = 0; i < n; i++ )
{
z = 0.0;
for ( j = 0; j < nn; j++ )
{
z = z + p[i+j*n];
}
z = z - p[i+ihi*n];
pbar[i] = z / dn;
}
/*
Reflection through the centroid.
*/
for ( i = 0; i < n; i++ )
{
pstar[i] = pbar[i] + rcoeff * ( pbar[i] - p[i+ihi*n] );
if (pstar[i] < lb[i]) pstar[i] = lb[i]; // Constraints
if (pstar[i] > ub[i]) pstar[i] = ub[i]; // Constraints
}
ystar = fn ( n, pstar, data_t );
*icount = *icount + 1;
/*
Successful reflection, so extension.
*/
if ( ystar < ylo )
{
for ( i = 0; i < n; i++ )
{
p2star[i] = pbar[i] + ecoeff * ( pstar[i] - pbar[i] );
if (p2star[i] < lb[i]) p2star[i] = lb[i]; // Constraints
if (p2star[i] > ub[i]) p2star[i] = ub[i]; // Constraints
}
y2star = fn ( n, p2star, data_t );
*icount = *icount + 1;
/*
Check extension.
*/
if ( ystar < y2star )
{
for ( i = 0; i < n; i++ )
{
p[i+ihi*n] = pstar[i];
}
y[ihi] = ystar;
}
/*
Retain extension or contraction.
*/
else
{
for ( i = 0; i < n; i++ )
{
p[i+ihi*n] = p2star[i];
}
y[ihi] = y2star;
}
}
/*
No extension.
*/
else
{
l = 0;
for ( i = 0; i < nn; i++ )
{
if ( ystar < y[i] )
{
l = l + 1;
}
}
if ( 1 < l )
{
for ( i = 0; i < n; i++ )
{
p[i+ihi*n] = pstar[i];
}
y[ihi] = ystar;
}
/*
Contraction on the Y(IHI) side of the centroid.
*/
else if ( l == 0 )
{
for ( i = 0; i < n; i++ )
{
p2star[i] = pbar[i] + ccoeff * ( p[i+ihi*n] - pbar[i] );
if (p2star[i] < lb[i]) p2star[i] = lb[i]; // Constraints
if (p2star[i] > ub[i]) p2star[i] = ub[i]; // Constraints
}
y2star = fn ( n, p2star, data_t );
*icount = *icount + 1;
/*
Contract the whole simplex.
*/
if ( y[ihi] < y2star )
{
for ( j = 0; j < nn; j++ )
{
for ( i = 0; i < n; i++ )
{
p[i+j*n] = ( p[i+j*n] + p[i+ilo*n] ) * 0.5;
xmin[i] = p[i+j*n];
if (xmin[i] < lb[i]) xmin[i] = lb[i]; // Constraints
if (xmin[i] > ub[i]) xmin[i] = ub[i]; // Constraints
}
y[j] = fn ( n, xmin, data_t );
*icount = *icount + 1;
}
ylo = y[0];
ilo = 0;
for ( i = 1; i < nn; i++ )
{
if ( y[i] < ylo )
{
ylo = y[i];
ilo = i;
}
}
continue;
}
/*
Retain contraction.
*/
else
{
for ( i = 0; i < n; i++ )
{
p[i+ihi*n] = p2star[i];
}
y[ihi] = y2star;
}
}
/*
Contraction on the reflection side of the centroid.
*/
else if ( l == 1 )
{
for ( i = 0; i < n; i++ )
{
p2star[i] = pbar[i] + ccoeff * ( pstar[i] - pbar[i] );
if (p2star[i] < lb[i]) p2star[i] = lb[i]; // Constraints
if (p2star[i] > ub[i]) p2star[i] = ub[i]; // Constraints
}
y2star = fn ( n, p2star, data_t );
*icount = *icount + 1;
/*
Retain reflection?
*/
if ( y2star <= ystar )
{
for ( i = 0; i < n; i++ )
{
p[i+ihi*n] = p2star[i];
}
y[ihi] = y2star;
}
else
{
for ( i = 0; i < n; i++ )
{
p[i+ihi*n] = pstar[i];
}
y[ihi] = ystar;
}
}
}
/*
Check if YLO improved.
*/
if ( y[ihi] < ylo )
{
ylo = y[ihi];
ilo = ihi;
}
jcount = jcount - 1;
if ( 0 < jcount )
{
continue;
}
/*
Check to see if minimum reached.
*/
if ( *icount <= kcount )
{
jcount = konvge;
z = 0.0;
for ( i = 0; i < nn; i++ )
{
z = z + y[i];
}
x = z / dnn;
z = 0.0;
for ( i = 0; i < nn; i++ )
{
z = z + pow ( y[i] - x, 2 );
}
if ( z <= rq )
{
break;
}
}
}
/*
Factorial tests to check that YNEWLO is a local minimum.
*/
for ( i = 0; i < n; i++ )
{
xmin[i] = p[i+ilo*n];
}
*ynewlo = y[ilo];
if ( kcount < *icount )
{
*ifault = 2;
break;
}
*ifault = 0;
for ( i = 0; i < n; i++ )
{
del = step[i] * eps;
xmin[i] = xmin[i] + del;
if (xmin[i] < lb[i]) xmin[i] = lb[i]; // Constraints
if (xmin[i] > ub[i]) xmin[i] = ub[i]; // Constraints
z = fn ( n, xmin, data_t );
*icount = *icount + 1;
if ( z < *ynewlo )
{
*ifault = 2;
break;
}
xmin[i] = xmin[i] - del - del;
if (xmin[i] < lb[i]) xmin[i] = lb[i]; // Constraints
if (xmin[i] > ub[i]) xmin[i] = ub[i]; // Constraints
z = fn ( n, xmin, data_t );
*icount = *icount + 1;
if ( z < *ynewlo )
{
*ifault = 2;
break;
}
xmin[i] = xmin[i] + del;
}
if ( *ifault == 0 )
{
break;
}
/*
Restart the procedure.
*/
for ( i = 0; i < n; i++ )
{
start[i] = xmin[i];
}
del = eps;
*numres = *numres + 1;
}
return;
}
//END NLDRMD FUNCTION
// the binsizes used for the binning
RealType EtaBinSize = 0;
RealType OmeBinSize = 0;
// some macros for math calculations
#define crossProduct(a,b,c) \
(a)[0] = (b)[1] * (c)[2] - (c)[1] * (b)[2]; \
(a)[1] = (b)[2] * (c)[0] - (c)[2] * (b)[0]; \
(a)[2] = (b)[0] * (c)[1] - (c)[0] * (b)[1];
#define dot(v,q) \
((v)[0] * (q)[0] + \
(v)[1] * (q)[1] + \
(v)[2] * (q)[2])
#define CalcLength(x,y,z) sqrt((x)*(x) + (y)*(y) + (z)*(z))
#define CHECK(call){ \
const cudaError_t error = call; \
if (error != cudaSuccess){ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(-10*error); \
} \
}
RealType cpuSecond(){
struct timeval tp;
gettimeofday(&tp,NULL);
return ((RealType)tp.tv_sec + (RealType)tp.tv_usec*1.e-6);
}
struct ParametersStruct {
int RingNumbers[MAX_N_RINGS]; // the ring numbers to use for indexing (1, 2, 4, etc)
int SpaceGroupNum; //
RealType LatticeConstant; // [Angstrom]
RealType Wavelength; // Wavelength of incoming beam [Angstrom]
RealType Distance; // Distance between sample and detector [micron]
RealType Rsample; // Radius of the sample [micron]
RealType Hbeam; // Height of the beam [micron]
RealType StepsizePos; // step size in position [micron]
RealType StepsizeOrient; // step size in orientation (rotation around the plane normal) [degrees]
int NrOfRings; // No of rings to use (not explicit input by user, but set via RingNumbers[])
RealType RingRadii[MAX_N_RINGS]; // Radii of the rings [micron]. this is a used internally: ringrad of ring 1 is at index 1 etc.
RealType RingRadiiUser[MAX_N_RINGS]; // Radii of the rings [micron]. stores only radii of the used rings!! Used for user input.
RealType MarginOme; // Margin in Omega [degrees], when assigning theoretical spots to experimental spots. (|omeT-omeO| < MarginOme)
RealType MarginEta; // Margin in eta [degrees], ,,
RealType MarginRad; // Margin in radius [micron], ,,
RealType MarginRadial; // Margin in radial direction (ortogonal to the ring) [micron], ,,
RealType EtaBinSize; // Size of bin for eta [degrees]
RealType OmeBinSize; // Size of bin for omega [degrees]
RealType ExcludePoleAngle; // Spots can be excluded at the poles: the range is |Eta| < ExcludePoleAngle and 180-|Eta| < ExcludePoleAngle [degrees]
RealType MinMatchesToAcceptFrac; // Minimum fraction (matched_spots/exp_spots) to accept an orientation+position.
RealType BoxSizes[MAX_N_OMEGARANGES][4]; // for each omegarange a box (window: left right bottom top) that defines the spots to include during indexing [micron]
RealType OmegaRanges[MAX_N_OMEGARANGES][2]; // Omegaranges: min, max [degrees], multiple possible.
char OutputFolder[MAX_LINE_LENGTH]; // output folder
char ResultFolder[MAX_LINE_LENGTH]; // Results folder
int NoOfOmegaRanges; // Automaticly set from Omegaranges (not explicit input by user)
char SpotsFileName[MAX_LINE_LENGTH]; // filename containing observed spots (see top for definition of columns)
char IDsFileName [MAX_LINE_LENGTH]; // filename containing the spot-ids that will be used for indexing
int UseFriedelPairs; // 0=do not use friedelpairs 1=try to use friedelpairs
RealType ABCABG[6]; // ABC, Alpha, Beta, Gamma for the structure
RealType MargABC;
RealType MargABG;
int TopLayer;
RealType wedge;
};
int ReadParams(char FileName[], struct ParametersStruct * Params){
FILE *fp;
char line[MAX_LINE_LENGTH];
char dummy[MAX_LINE_LENGTH];
char *str;
int NrOfBoxSizes = 0;
int cmpres;
int NoRingNumbers = 0; // should be equal to Params->NrOfRings
Params->NrOfRings = 0;
Params->NoOfOmegaRanges = 0;
fp = fopen(FileName, "r");
if (fp==NULL) {
printf("Cannot open file: %s.\n", FileName);
return(1);
}
fflush(stdout);
// now get the params: format: "string" value(s)
while (fgets(line, MAX_LINE_LENGTH, fp) != NULL) {
str = "RingNumbers ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %d", dummy, &(Params->RingNumbers[NoRingNumbers]) );
NoRingNumbers++;
continue;
}
str = "TopLayer ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %d", dummy, &(Params->TopLayer) );
continue;
}
str = "SpaceGroup ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %d", dummy, &(Params->SpaceGroupNum) );
continue;
}
str = "LatticeParameter ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->LatticeConstant) );
sscanf(line, "%s %lf %lf %lf %lf %lf %lf", dummy, &(Params->ABCABG[0]), &(Params->ABCABG[1]),
&(Params->ABCABG[2]), &(Params->ABCABG[3]), &(Params->ABCABG[4]), &(Params->ABCABG[5]));
continue;
}
str = "Wavelength ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->Wavelength) );
continue;
}
str = "Distance ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->Distance) );
continue;
}
str = "Rsample ";
cmpres = strncmp(line, str, strlen(str));
if ( cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->Rsample) );
continue;
}
str = "Hbeam ";
cmpres = strncmp(line, str, strlen(str));
if ( cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->Hbeam) );
continue;
}
str = "Wedge ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->wedge) );
continue;
}
str = "StepsizePos ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->StepsizePos) );
continue;
}
str = "StepsizeOrient ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->StepsizeOrient) );
continue;
}
str = "MarginOme ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->MarginOme) );
continue;
}
str = "MarginRadius ";
cmpres = strncmp(line, str , strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->MarginRad) );
continue;
}
str = "MarginRadial ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->MarginRadial) );
continue;
}
str = "EtaBinSize ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->EtaBinSize) );
continue;
}
str = "OmeBinSize ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->OmeBinSize) );
continue;
}
str = "MinMatchesToAcceptFrac ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->MinMatchesToAcceptFrac) );
continue;
}
str = "ExcludePoleAngle ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->ExcludePoleAngle) );
continue;
}
str = "RingRadii ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->RingRadiiUser[Params->NrOfRings]));
Params->NrOfRings = Params->NrOfRings + 1;
continue;
}
str = "OmegaRange ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf %lf", dummy, &(Params->OmegaRanges[Params->NoOfOmegaRanges][0]),
&(Params->OmegaRanges[Params->NoOfOmegaRanges][1]));
(Params->NoOfOmegaRanges)++;
continue;
}
str = "BoxSize ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf %lf %lf %lf", dummy, &(Params->BoxSizes[NrOfBoxSizes][0]),
&(Params->BoxSizes[NrOfBoxSizes][1]),
&(Params->BoxSizes[NrOfBoxSizes][2]),
&(Params->BoxSizes[NrOfBoxSizes][3]));
NrOfBoxSizes++;
continue;
}
str = "SpotsFileName ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %s", dummy, Params->SpotsFileName );
continue;
}
str = "IDsFileName ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %s", dummy, Params->IDsFileName );
continue;
}
str = "MarginEta ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->MarginEta) );
continue;
}
str = "MargABC ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->MargABC) );
continue;
}
str = "MargABG ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->MargABG) );
continue;
}
str = "UseFriedelPairs ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %d", dummy, &(Params->UseFriedelPairs) );
continue;
}
str = "OutputFolder ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %s", dummy, Params->OutputFolder );
continue;
}
str = "ResultFolder ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %s", dummy, Params->ResultFolder );
continue;
}
// if string is empty
str = "";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
continue;
}
// if string not recognized: print warning all other cases
printf("Warning: skipping line in parameters file:\n");
printf("%s\n", line);
}
// make a Params->RingRadii for internal use: ringno is directly the index in array (RingRadii[5] = ringradius from ring 5)
int i;
for (i = 0 ; i < MAX_N_RINGS ; i++ ) {
Params->RingRadii[i] = 0;
}
for (i = 0 ; i < Params->NrOfRings ; i++ ) {
Params->RingRadii[Params->RingNumbers[i]] = Params->RingRadiiUser[i];
}
return(0);
}
__device__ int FindRowInMatrix(RealType *aMatrixp, int nrows, int ncols, int SearchColumn, int aVal){
for (int r=0 ; r< nrows ; r++) {
if (aMatrixp[(r*ncols) + SearchColumn] == aVal){
return r;
}
}
return -1;
}
__device__ RealType CalcEtaAngle(RealType y, RealType z) {
RealType alph = rad2deg * acos(z/sqrt(y*y+z*z));
if (y > 0) alph = -alph;
return alph;
}
__device__ void AxisAngle2RotMatrix(RealType axis[3], RealType angle, RealType R[3][3]){
if ( (axis[0] == 0) && (axis[1] == 0) && (axis[2] == 0) ) {
R[0][0] = 1;
R[1][0] = 0;
R[2][0] = 0;
R[0][1] = 0;
R[1][1] = 1;
R[2][1] = 0;
R[0][2] = 0;
R[1][2] = 0;
R[2][2] = 1;
return;
}
RealType u = axis[0]*(1/sqrt(axis[0]*axis[0] + axis[1]*axis[1] + axis[2]*axis[2]));
RealType v = axis[1]*(1/sqrt(axis[0]*axis[0] + axis[1]*axis[1] + axis[2]*axis[2]));
RealType w = axis[2]*(1/sqrt(axis[0]*axis[0] + axis[1]*axis[1] + axis[2]*axis[2]));
RealType angleRad = deg2rad * angle;
RealType rcos = cos(angleRad);
RealType rsin = sin(angleRad);
R[0][0] = rcos + u*u*(1-rcos);
R[1][0] = w * rsin + v*u*(1-rcos);
R[2][0] = -v * rsin + w*u*(1-rcos);
R[0][1] = -w * rsin + u*v*(1-rcos);
R[1][1] = rcos + v*v*(1-rcos);
R[2][1] = u * rsin + w*v*(1-rcos);
R[0][2] = v * rsin + u*w*(1-rcos);
R[1][2] = -u * rsin + v*w*(1-rcos);
R[2][2] = rcos + w*w*(1-rcos);
return;
}
__device__ RealType CalcRotationAngle (int RingNr, int *HKLints, int *IntParamArr,
RealType *RTParamArr){
int habs, kabs, labs;
for (int i=0;i<MAX_N_HKLS;i++){
if (HKLints[i*4+3] == RingNr){
habs = abs(HKLints[i*4+0]);
kabs = abs(HKLints[i*4+1]);
labs = abs(HKLints[i*4+2]);
break;
}
}
int SGNum = IntParamArr[0];
RealType ABCABG[6];
for (int i=0;i<6;i++) ABCABG[i] = RTParamArr[13 + MAX_N_RINGS + i];
int nzeros = 0;
if (habs == 0) nzeros++;
if (kabs == 0) nzeros++;
if (labs == 0) nzeros++;
if (nzeros == 3) return 0;
if (SGNum == 1 || SGNum == 2){
return 360;
}else if (SGNum >= 3 && SGNum <= 15){
if (nzeros != 2) return 360;
else if (ABCABG[3] == 90 && ABCABG[4] == 90 && labs != 0){
return 180;
}else if (ABCABG[3] == 90 && ABCABG[5] == 90 && habs != 0){
return 180;
}else if (ABCABG[3] == 90 && ABCABG[5] == 90 && kabs != 0){
return 180;
}else return 360;
}else if (SGNum >= 16 && SGNum <= 74){
if (nzeros !=2) return 360;
else return 180;
}else if (SGNum >= 75 && SGNum <= 142){
if (nzeros == 0) return 360;
else if (nzeros == 1 && labs == 0 && habs == kabs){
return 180;
}else if (nzeros == 2){
if (labs == 0){
return 180;
}else{
return 90;
}
}else return 360;
}else if (SGNum >= 143 && SGNum <= 167){
if (nzeros == 0) return 360;
else if (nzeros == 2 && labs != 0) return 120;
else return 360;
}else if (SGNum >= 168 && SGNum <= 194){
if (nzeros == 2 && labs != 0) return 60;
else return 360;
}else if (SGNum >= 195 && SGNum <= 230){
if (nzeros == 2) return 90;
else if (nzeros == 1){
if (habs == kabs || kabs == labs || habs == labs) return 180;
} else if (habs == kabs && kabs == labs) return 120;
else return 360;
}
return 0;
}
__device__ void MatrixMultF33(RealType m[3][3], RealType n[3][3], RealType res[3][3]){
for (int r=0; r<3; r++) {
res[r][0] = m[r][0]*n[0][0] +
m[r][1]*n[1][0] +
m[r][2]*n[2][0];
res[r][1] = m[r][0]*n[0][1] +
m[r][1]*n[1][1] +
m[r][2]*n[2][1];
res[r][2] = m[r][0]*n[0][2] +
m[r][1]*n[1][2] +
m[r][2]*n[2][2];
}
}
__device__ void MatrixMultF(RealType m[3][3], RealType v[3], RealType r[3]){
for (int i=0; i<3; i++) {
r[i] = m[i][0]*v[0] +
m[i][1]*v[1] +
m[i][2]*v[2];
}
}
__device__ void RotateAroundZ(RealType v1[3], RealType alph, RealType v2[3]){
RealType mat[3][3] = {{ cos(alph*deg2rad), -sin(alph*deg2rad), 0 },
{ sin(alph*deg2rad), cos(alph*deg2rad), 0 },
{ 0, 0, 1}};
MatrixMultF(mat, v1, v2);
}
__device__ int CalcOmega(RealType x, RealType y, RealType z, RealType theta, RealType omegas[4], RealType etas[4]) {
int nsol = 0;
RealType v=sin(theta*deg2rad)*sqrt(x*x + y*y + z*z);
if ( fabs(y) < 1e-4 ) {
if (x != 0) {
if (fabs(-v/x) <= 1) {
omegas[nsol] = acos(-v/x)*rad2deg;
nsol = nsol + 1;
omegas[nsol] = -acos(-v/x)*rad2deg;
nsol = nsol + 1;
}
}
} else {
RealType cosome1;
RealType cosome2;
if ((((2*v*x) / (y*y))*((2*v*x) / (y*y)) - 4*(1 + ((x*x) / (y*y)))*(((v*v) / (y*y)) - 1)) >= 0) {
cosome1 = (-((2*v*x) / (y*y)) + sqrt((((2*v*x) / (y*y))*((2*v*x) / (y*y)) - 4*(1 + ((x*x) / (y*y)))*(((v*v) / (y*y)) - 1))))/(2*(1 + ((x*x) / (y*y))));
if (fabs(cosome1) <= 1) {
if (fabs(-x*cos(acos(cosome1)) + y*sin(acos(cosome1)) - v) < fabs(-x*cos(-acos(cosome1)) + y*sin(-acos(cosome1)) - v) ) {
omegas[nsol] = acos(cosome1)*rad2deg;
nsol = nsol + 1;
}else {
omegas[nsol] = -acos(cosome1)*rad2deg;
nsol = nsol + 1;
}
}
cosome2 = (-((2*v*x) / (y*y)) - sqrt((((2*v*x) / (y*y))*((2*v*x) / (y*y)) - 4*(1 + ((x*x) / (y*y)))*(((v*v) / (y*y)) - 1))))/(2*(1 + ((x*x) / (y*y))));
if (fabs(cosome2) <= 1) {
if (fabs(-x*cos(acos(cosome2)) + y*sin(acos(cosome2)) - v) < fabs(-x*cos(-acos(cosome2)) + y*sin(-acos(cosome2)) - v)) {
omegas[nsol] = acos(cosome2)*rad2deg;
nsol = nsol + 1;
} else {
omegas[nsol] = -acos(cosome2)*rad2deg;
nsol = nsol + 1;
}
}
}
}
RealType gw[3];
RealType gv[3]={x,y,z};
RealType eta;
for (int indexOme = 0; indexOme < nsol; indexOme++) {
RotateAroundZ(gv, omegas[indexOme], gw);
eta = CalcEtaAngle(gw[1],gw[2]);
etas[indexOme] = eta;
}
return nsol;
}
__device__ int CalcDiffrSpots_Furnace(RealType OrientMatrix[3][3],
RealType *RingRadii, RealType *OmeBoxArr, int NOmegaRanges,
RealType ExcludePoleAngle, RealType *spots, RealType *hkls, int *n_arr){
int OmegaRangeNo;
int KeepSpot;
RealType Ghkl[3];
RealType Gc[3];
RealType omegas[4];
RealType etas[4];
RealType yl;
RealType zl;
int nspotsPlane;
int spotnr = 0;
RealType nrhkls;
for (int indexhkl=0; indexhkl < n_arr[1] ; indexhkl++) {
Ghkl[0] = hkls[indexhkl*7+0];
Ghkl[1] = hkls[indexhkl*7+1];
Ghkl[2] = hkls[indexhkl*7+2];
MatrixMultF(OrientMatrix,Ghkl, Gc);
nspotsPlane = CalcOmega(Gc[0], Gc[1], Gc[2], hkls[indexhkl*7+5], omegas, etas);
nrhkls = (RealType)indexhkl*2 + 1;
for (int i=0 ; i<nspotsPlane ; i++) {
if ((fabs(etas[i]) < ExcludePoleAngle ) || ((180-fabs(etas[i])) < ExcludePoleAngle)) continue;
yl = -(sin(deg2rad * etas[i])*RingRadii[(int)(hkls[indexhkl*7+3])]);
zl = cos(deg2rad * etas[i])*RingRadii[(int)(hkls[indexhkl*7+3])];
for (OmegaRangeNo = 0 ; OmegaRangeNo < NOmegaRanges ; OmegaRangeNo++ ) {
KeepSpot = 0;
if ((omegas[i] > OmeBoxArr[OmegaRangeNo*6+4]) &&
(omegas[i] < OmeBoxArr[OmegaRangeNo*6+5]) &&
(yl > OmeBoxArr[OmegaRangeNo*6+0]) &&
(yl < OmeBoxArr[OmegaRangeNo*6+1]) &&
(zl > OmeBoxArr[OmegaRangeNo*6+2]) &&
(zl < OmeBoxArr[OmegaRangeNo*6+3]) ) {
KeepSpot = 1;
break;
}
}
if (KeepSpot) {
spots[spotnr*N_COL_THEORSPOTS+0] = yl;
spots[spotnr*N_COL_THEORSPOTS+1] = zl;
spots[spotnr*N_COL_THEORSPOTS+2] = omegas[i];
spots[spotnr*N_COL_THEORSPOTS+3] = hkls[indexhkl*7+3];
spots[spotnr*N_COL_THEORSPOTS+4] = nrhkls;
nrhkls++;
spotnr++;
}
}
}
return spotnr;
}
__device__ int CalcOmegaStrains(
RealType x,
RealType y,
RealType z,
RealType theta,
RealType omegas[4],
RealType etas[4])
{
int nsol = 0;
RealType ome;
RealType len= sqrt(x*x + y*y + z*z);
RealType v=sin(theta*deg2rad)*len;
RealType almostzero = 1e-4;
if ( fabs(y) < almostzero ) {
if (x != 0) {
RealType cosome1 = -v/x;
if (fabs(cosome1) <= 1) {
ome = acos(cosome1)*rad2deg;
omegas[nsol] = ome;
nsol++;
omegas[nsol] = -ome;
nsol++;
}
}
}
else {
RealType y2 = y*y;
RealType a = 1 + ((x*x) / y2);
RealType b = (2*v*x) / y2;
RealType c = ((v*v) / y2) - 1;
RealType discr = b*b - 4*a*c;
RealType ome1a;
RealType ome1b;
RealType ome2a;
RealType ome2b;
RealType cosome1;
RealType cosome2;
RealType eqa, eqb, diffa, diffb;
if (discr >= 0) {
cosome1 = (-b + sqrt(discr))/(2*a);
if (fabs(cosome1) <= 1) {
ome1a = acos(cosome1);
ome1b = -ome1a;
eqa = -x*cos(ome1a) + y*sin(ome1a);
diffa = fabs(eqa - v);
eqb = -x*cos(ome1b) + y*sin(ome1b);
diffb = fabs(eqb - v);
if (diffa < diffb ) {
omegas[nsol] = ome1a*rad2deg;
nsol ++;
}
else {
omegas[nsol] = ome1b*rad2deg;
nsol++;
}
}
cosome2 = (-b - sqrt(discr))/(2*a);
if (fabs(cosome2) <= 1) {
ome2a = acos(cosome2);
ome2b = -ome2a;
eqa = -x*cos(ome2a) + y*sin(ome2a);
diffa = fabs(eqa - v);
eqb = -x*cos(ome2b) + y*sin(ome2b);
diffb = fabs(eqb - v);
if (diffa < diffb) {
omegas[nsol] = ome2a*rad2deg;
nsol++;
}
else {
omegas[nsol] = ome2b*rad2deg;
nsol++;
}
}
}
}
RealType gw[3];
RealType gv[3]={x,y,z};
RealType eta;
int indexOme;
for (indexOme = 0; indexOme < nsol; indexOme++) {
RotateAroundZ(gv, omegas[indexOme], gw);
eta = CalcEtaAngle(gw[1],gw[2]);
etas[indexOme] = eta;
}
return(nsol);
}
// Returns more stuff needed for Fitting
// N_COL_THEORSPOTS is 8, so we can store everything we need.
__device__ int CalcDiffrSpots(RealType OrientMatrix[3][3],
RealType *RingRadii, RealType *OmeBoxArr, int NOmegaRanges,
RealType ExcludePoleAngle, RealType *spots, RealType *hkls, int *n_arr){
int OmegaRangeNo;
int KeepSpot;
RealType Ghkl[3];
RealType Gc[3];
RealType omegas[4];
RealType etas[4];
RealType yl;
RealType zl;
int nspotsPlane;
int spotnr = 0;
RealType GCr[3], NGc;
RealType nrhkls, Ds;
for (int indexhkl=0; indexhkl < n_arr[1] ; indexhkl++) {
Ghkl[0] = hkls[indexhkl*7+0];
Ghkl[1] = hkls[indexhkl*7+1];
Ghkl[2] = hkls[indexhkl*7+2];
MatrixMultF(OrientMatrix,Ghkl, Gc);
nspotsPlane = CalcOmegaStrains(Gc[0], Gc[1], Gc[2], hkls[indexhkl*7+5], omegas, etas);
NGc=sqrt((Gc[0]*Gc[0])+(Gc[1]*Gc[1])+(Gc[2]*Gc[2]));
Ds=hkls[indexhkl*7+4];
GCr[0]=Ds*Gc[0]/NGc;
GCr[1]=Ds*Gc[1]/NGc;
GCr[2]=Ds*Gc[2]/NGc;
nrhkls = (RealType)indexhkl*2 + 1;
for (int i=0 ; i<nspotsPlane ; i++) {
if ((fabs(etas[i]) < ExcludePoleAngle ) || ((180-fabs(etas[i])) < ExcludePoleAngle)) continue;
yl = -(sin(deg2rad * etas[i])*RingRadii[(int)(hkls[indexhkl*7+3])]);
zl = cos(deg2rad * etas[i])*RingRadii[(int)(hkls[indexhkl*7+3])];
for (OmegaRangeNo = 0 ; OmegaRangeNo < NOmegaRanges ; OmegaRangeNo++ ) {
KeepSpot = 0;
if ((omegas[i] > OmeBoxArr[OmegaRangeNo*6+4]) &&
(omegas[i] < OmeBoxArr[OmegaRangeNo*6+5]) &&
(yl > OmeBoxArr[OmegaRangeNo*6+0]) &&
(yl < OmeBoxArr[OmegaRangeNo*6+1]) &&
(zl > OmeBoxArr[OmegaRangeNo*6+2]) &&
(zl < OmeBoxArr[OmegaRangeNo*6+3]) ) {
KeepSpot = 1;
break;
}
}
if (KeepSpot) {
spots[spotnr*8+0] = yl;
spots[spotnr*8+1] = zl;
spots[spotnr*8+2] = omegas[i];
spots[spotnr*8+3] = GCr[0];
spots[spotnr*8+4] = GCr[1];
spots[spotnr*8+5] = GCr[2];
spots[spotnr*8+6] = hkls[indexhkl*7+3];
spots[spotnr*8+7] = nrhkls;
nrhkls++;
spotnr++;
}
}
}
return spotnr;
}
__device__ int CalcDiffrSpotsStrained(RealType OrientMatrix[3][3],
RealType *OmeBoxArr, int NOmegaRanges, RealType ExcludePoleAngle,
RealType *spots, RealType *hkls, int *n_arr){
int OmegaRangeNo;
int KeepSpot;
RealType Ghkl[3];
RealType Gc[3];
RealType omegas[4];
RealType etas[4];
RealType yl;
RealType zl;
int nspotsPlane;
int spotnr = 0;
RealType GCr[3], NGc;
RealType nrhkls, Ds, RingRad;
for (int indexhkl=0; indexhkl < n_arr[1] ; indexhkl++) {
Ghkl[0] = hkls[indexhkl*7+0];
Ghkl[1] = hkls[indexhkl*7+1];
Ghkl[2] = hkls[indexhkl*7+2];
MatrixMultF(OrientMatrix,Ghkl, Gc);
nspotsPlane = CalcOmegaStrains(Gc[0], Gc[1], Gc[2], hkls[indexhkl*7+5], omegas, etas);
NGc=sqrt((Gc[0]*Gc[0])+(Gc[1]*Gc[1])+(Gc[2]*Gc[2]));
Ds=hkls[indexhkl*7+4];
GCr[0]=Ds*Gc[0]/NGc;
GCr[1]=Ds*Gc[1]/NGc;
GCr[2]=Ds*Gc[2]/NGc;
nrhkls = (RealType)indexhkl*2 + 1;
RingRad = hkls[indexhkl*7+6];
for (int i=0 ; i<nspotsPlane ; i++) {
if ((fabs(etas[i]) < ExcludePoleAngle ) || ((180-fabs(etas[i])) < ExcludePoleAngle)) continue;
yl = -(sin(deg2rad * etas[i])*RingRad);
zl = cos(deg2rad * etas[i])*RingRad;
for (OmegaRangeNo = 0 ; OmegaRangeNo < NOmegaRanges ; OmegaRangeNo++ ) {
KeepSpot = 0;
if ((omegas[i] > OmeBoxArr[OmegaRangeNo*6+4]) &&
(omegas[i] < OmeBoxArr[OmegaRangeNo*6+5]) &&
(yl > OmeBoxArr[OmegaRangeNo*6+0]) &&
(yl < OmeBoxArr[OmegaRangeNo*6+1]) &&
(zl > OmeBoxArr[OmegaRangeNo*6+2]) &&
(zl < OmeBoxArr[OmegaRangeNo*6+3]) ) {
KeepSpot = 1;
break;
}
}
if (KeepSpot) {
spots[spotnr*8+0] = yl;
spots[spotnr*8+1] = zl;
spots[spotnr*8+2] = omegas[i];
spots[spotnr*8+3] = GCr[0];
spots[spotnr*8+4] = GCr[1];
spots[spotnr*8+5] = GCr[2];
spots[spotnr*8+6] = hkls[indexhkl*7+3];
spots[spotnr*8+7] = nrhkls;
nrhkls++;
spotnr++;
}
}
}
return spotnr;
}
#define sind(x) sin(deg2rad*x)
#define cosd(x) cos(deg2rad*x)
#define tand(x) tan(deg2rad*x)
#define asind(x) rad2deg*asin(x)
#define acosd(x) rad2deg*acos(x)
#define atand(x) rad2deg*atan(x)
__device__ void CorrectHKLsLatCInd(RealType *LatC_d, RealType *hklsIn,
int *n_arr, RealType *RTParamArr, RealType *hklscorr, int *HKLints_d){
RealType *hkls;
hkls = hklscorr;
RealType a=LatC_d[0],b=LatC_d[1],c=LatC_d[2],alph=LatC_d[3],bet=LatC_d[4],gamma=LatC_d[5];
int hklnr;
RealType ginit[3], SinA, SinB, SinG, CosA, CosB, CosG, GammaPr, BetaPr, SinBetaPr,
Vol, APr, BPr, CPr, B[3][3], GCart[3], Ds, Theta, Rad;
SinA = sind(alph);
SinB = sind(bet);
SinG = sind(gamma);
CosA = cosd(alph);
CosB = cosd(bet);
CosG = cosd(gamma);
GammaPr = acosd((CosA*CosB - CosG)/(SinA*SinB));
BetaPr = acosd((CosG*CosA - CosB)/(SinG*SinA));
SinBetaPr = sind(BetaPr);
Vol = (a*(b*(c*(SinA*(SinBetaPr*(SinG))))));
APr = b*c*SinA/Vol;
BPr = c*a*SinB/Vol;
CPr = a*b*SinG/Vol;
B[0][0] = APr;
B[0][1] = (BPr*cosd(GammaPr));
B[0][2] = (CPr*cosd(BetaPr));
B[1][0] = 0,
B[1][1] = (BPr*sind(GammaPr));
B[1][2] = (-CPr*SinBetaPr*CosA);
B[2][0] = 0;
B[2][1] = 0;
B[2][2] = (CPr*SinBetaPr*SinA);
for (hklnr=0;hklnr<n_arr[1];hklnr++){
ginit[0] = (RealType) HKLints_d[hklnr*4+0];
ginit[1] = (RealType) HKLints_d[hklnr*4+1];
ginit[2] = (RealType) HKLints_d[hklnr*4+2];
MatrixMultF(B,ginit,GCart);
Ds = 1/(sqrt((GCart[0]*GCart[0])+(GCart[1]*GCart[1])+(GCart[2]*GCart[2])));
hkls[hklnr*7+0] = GCart[0];
hkls[hklnr*7+1] = GCart[1];
hkls[hklnr*7+2] = GCart[2];
hkls[hklnr*7+3] = hklsIn[hklnr*7+3];
hkls[hklnr*7+4] = Ds;
Theta = (asind((RTParamArr[5+MAX_N_RINGS+8+6])/(2*Ds)));
hkls[hklnr*7+5] = Theta;
Rad = RTParamArr[0]*(tand(2*Theta));
hkls[hklnr*7+6] = Rad;
}
}
__device__ void Euler2OrientMat(RealType Euler[3], RealType m_out[3][3]){
RealType psi, phi, theta, cps, cph, cth, sps, sph, sth;
psi = Euler[0];
phi = Euler[1];
theta = Euler[2];
cps = cosd(psi) ; cph = cosd(phi); cth = cosd(theta);
sps = sind(psi); sph = sind(phi); sth = sind(theta);
m_out[0][0] = cth * cps - sth * cph * sps;
m_out[0][1] = -cth * cph * sps - sth * cps;
m_out[0][2] = sph * sps;
m_out[1][0] = cth * sps + sth * cph * cps;
m_out[1][1] = cth * cph * cps - sth * sps;
m_out[1][2] = -sph * cps;
m_out[2][0] = sth * sph;
m_out[2][1] = cth * sph;
m_out[2][2] = cph;
}
void Euler2OrientMat_h(RealType Euler[3], RealType m_out[3][3]){
RealType psi, phi, theta, cps, cph, cth, sps, sph, sth;
psi = Euler[0];
phi = Euler[1];
theta = Euler[2];
cps = cosd(psi) ; cph = cosd(phi); cth = cosd(theta);
sps = sind(psi); sph = sind(phi); sth = sind(theta);
m_out[0][0] = cth * cps - sth * cph * sps;
m_out[0][1] = -cth * cph * sps - sth * cps;
m_out[0][2] = sph * sps;
m_out[1][0] = cth * sps + sth * cph * cps;
m_out[1][1] = cth * cph * cps - sth * sps;
m_out[1][2] = -sph * cps;
m_out[2][0] = sth * sph;
m_out[2][1] = cth * sph;
m_out[2][2] = cph;
}
__device__ void DisplacementInTheSpot(RealType a, RealType b, RealType c,
RealType xi, RealType yi, RealType zi, RealType omega, RealType wedge,
RealType chi, RealType *Displ_y, RealType *Displ_z){
RealType sinOme=sind(omega), cosOme=cosd(omega), AcosOme=a*cosOme, BsinOme=b*sinOme;
RealType XNoW=AcosOme-BsinOme, YNoW=(a*sinOme)+(b*cosOme), ZNoW=c;
RealType WedgeRad=deg2rad*wedge, CosW=cos(WedgeRad), SinW=sin(WedgeRad), XW=XNoW*CosW-ZNoW*SinW, YW=YNoW;
RealType ZW=(XNoW*SinW)+(ZNoW*CosW), ChiRad=deg2rad*chi, CosC=cos(ChiRad), SinC=sin(ChiRad), XC=XW;
RealType YC=(CosC*YW)-(SinC*ZW), ZC=(SinC*YW)+(CosC*ZW);
RealType IK[3],NormIK; IK[0]=xi-XC; IK[1]=yi-YC; IK[2]=zi-ZC; NormIK=sqrt((IK[0]*IK[0])+(IK[1]*IK[1])+(IK[2]*IK[2]));
IK[0]=IK[0]/NormIK;IK[1]=IK[1]/NormIK;IK[2]=IK[2]/NormIK;
*Displ_y = YC - ((XC*IK[1])/(IK[0]));
*Displ_z = ZC - ((XC*IK[2])/(IK[0]));
}
__device__
void CorrectForOme(RealType yc, RealType zc, RealType Lsd, RealType OmegaIni,
RealType wl, RealType wedge, RealType *ysOut, RealType *zsOut, RealType *OmegaOut)
{
RealType SinTheta = sin(deg2rad*rad2deg*atan(sqrt((yc*yc)+(zc*zc))/Lsd)/2);
RealType CosTheta = cos(deg2rad*rad2deg*atan(sqrt((yc*yc)+(zc*zc))/Lsd)/2);
RealType ds = 2*SinTheta/wl;
RealType CosW = cos(deg2rad*wedge);
RealType SinW = sin(deg2rad*wedge);
RealType SinEta = sin(deg2rad*CalcEtaAngle(yc,zc));
RealType CosEta = cos(deg2rad*CalcEtaAngle(yc,zc));
RealType k1 = -ds*SinTheta;
RealType k2 = -ds*CosTheta*SinEta;
RealType k3 = ds*CosTheta*CosEta;
if (CalcEtaAngle(yc,zc) == 90){k3 = 0; k2 = -CosTheta;}
else if (CalcEtaAngle(yc,zc) == -90) {k3 = 0; k2 = CosTheta;}
RealType k1f = (k1*CosW) + (k3*SinW);
RealType k2f = k2;
RealType k3f = (k3*CosW) - (k1*SinW);
RealType G1a = (k1f*cos(deg2rad*OmegaIni)) + (k2f*sin(deg2rad*OmegaIni));
RealType G2a = (k2f*cos(deg2rad*OmegaIni)) - (k1f*sin(deg2rad*OmegaIni));
RealType G3a = k3f;
RealType LenGa = sqrt((G1a*G1a)+(G2a*G2a)+(G3a*G3a));
RealType g1 = G1a*ds/LenGa;
RealType g2 = G2a*ds/LenGa;
RealType g3 = G3a*ds/LenGa;
SinW = 0;
CosW = 1;
RealType LenG = sqrt((g1*g1)+(g2*g2)+(g3*g3));
RealType k1i = -(LenG*LenG*wl)/2;
RealType A = (k1i+(g3*SinW))/(CosW);
RealType a_Sin = (g1*g1) + (g2*g2);
RealType b_Sin = 2*A*g2;
RealType c_Sin = (A*A) - (g1*g1);
RealType a_Cos = a_Sin;
RealType b_Cos = -2*A*g1;
RealType c_Cos = (A*A) - (g2*g2);
RealType Par_Sin = (b_Sin*b_Sin) - (4*a_Sin*c_Sin);
RealType Par_Cos = (b_Cos*b_Cos) - (4*a_Cos*c_Cos);
RealType P_check_Sin = 0;
RealType P_check_Cos = 0;
RealType P_Sin,P_Cos;
if (Par_Sin >=0) P_Sin=sqrt(Par_Sin);
else {P_Sin=0;P_check_Sin=1;}
if (Par_Cos>=0) P_Cos=sqrt(Par_Cos);
else {P_Cos=0;P_check_Cos=1;}
RealType SinOmega1 = (-b_Sin-P_Sin)/(2*a_Sin);
RealType SinOmega2 = (-b_Sin+P_Sin)/(2*a_Sin);
RealType CosOmega1 = (-b_Cos-P_Cos)/(2*a_Cos);
RealType CosOmega2 = (-b_Cos+P_Cos)/(2*a_Cos);
if (SinOmega1 < -1) SinOmega1=0;
else if (SinOmega1 > 1) SinOmega1=0;
else if (SinOmega2 < -1) SinOmega2=0;
else if (SinOmega2 > 1) SinOmega2=0;
if (CosOmega1 < -1) CosOmega1=0;
else if (CosOmega1 > 1) CosOmega1=0;
else if (CosOmega2 < -1) CosOmega2=0;
else if (CosOmega2 > 1) CosOmega2=0;
if (P_check_Sin == 1){SinOmega1=0;SinOmega2=0;}
if (P_check_Cos == 1){CosOmega1=0;CosOmega2=0;}
RealType Option1 = fabs((SinOmega1*SinOmega1)+(CosOmega1*CosOmega1)-1);
RealType Option2 = fabs((SinOmega1*SinOmega1)+(CosOmega2*CosOmega2)-1);
RealType Omega1, Omega2;
if (Option1 < Option2){Omega1=rad2deg*atan2(SinOmega1,CosOmega1);Omega2=rad2deg*atan2(SinOmega2,CosOmega2);}
else {Omega1=rad2deg*atan2(SinOmega1,CosOmega2);Omega2=rad2deg*atan2(SinOmega2,CosOmega1);}
RealType OmeDiff1 = fabs(Omega1-OmegaIni);
RealType OmeDiff2 = fabs(Omega2-OmegaIni);
RealType Omega;
if (OmeDiff1 < OmeDiff2)Omega=Omega1;
else Omega=Omega2;
RealType SinOmega=sin(deg2rad*Omega);
RealType CosOmega=cos(deg2rad*Omega);
RealType Fact = (g1*CosOmega) - (g2*SinOmega);
RealType Eta = CalcEtaAngle(k2,k3);
RealType Sin_Eta = sin(deg2rad*Eta);
RealType Cos_Eta = cos(deg2rad*Eta);
*ysOut = -Lsd*tan(deg2rad*2*rad2deg*asin(wl*LenG/2))*Sin_Eta;
*zsOut = Lsd*tan(deg2rad*2*rad2deg*asin(wl*LenG/2))*Cos_Eta;
*OmegaOut = Omega;
}
__device__ void SpotToGv(RealType xi, RealType yi, RealType zi, RealType Omega,
RealType theta, RealType *g1, RealType *g2, RealType *g3)
{
RealType CosOme = cosd(Omega), SinOme = sind(Omega), eta = CalcEtaAngle(yi,zi), TanEta = tand(-eta), SinTheta = sind(theta);
RealType CosTheta = cosd(theta), CosW = 1, SinW = 0, k3 = SinTheta*(1+xi)/((yi*TanEta)+zi), k2 = TanEta*k3, k1 = -SinTheta;
if (eta == 90){
k3 = 0;
k2 = -CosTheta;
} else if (eta == -90){
k3 = 0;
k2 = CosTheta;
}
RealType k1f = (k1*CosW) + (k3*SinW), k3f = (k3*CosW) - (k1*SinW), k2f = k2;
*g1 = (k1f*CosOme) + (k2f*SinOme);
*g2 = (k2f*CosOme) - (k1f*SinOme);
*g3 = k3f;
}
struct func_data_pos_ini{
int *IntParamArr;
RealType *OmeBoxArr;
RealType *spotsYZO;
RealType *hkls;
int *HKLInts;
int nMatched;
RealType *RTParamArr;
int *n_arr;
RealType *TheorSpots;
RealType *hklspace;
};
__device__ RealType pf_posIni(int n, RealType *x, void *f_data_trial){
struct func_data_pos_ini *f_data = (struct func_data_pos_ini *) f_data_trial;
RealType *TheorSpots, *spotsYZO, *RTParamArr, *OmeBoxArr, *hkls,
*hklscorr, *SpotsCorrected;
OmeBoxArr = &(f_data->OmeBoxArr[0]);
spotsYZO = &(f_data->spotsYZO[0]);
hkls = &(f_data->hkls[0]);
RTParamArr = &(f_data->RTParamArr[0]);
TheorSpots = &(f_data->TheorSpots[0]);
int nMatched = f_data->nMatched;
int *IntParamArr,*HKLInts,*n_arr;
IntParamArr = &(f_data->IntParamArr[0]);
HKLInts = &(f_data->HKLInts[0]);
n_arr = &(f_data->n_arr[0]);
hklscorr = &(f_data->hklspace[0]);
CorrectHKLsLatCInd(x+6,hkls,n_arr,RTParamArr,hklscorr,HKLInts);
RealType OrientMatrix[3][3];
Euler2OrientMat(x+3,OrientMatrix);
RealType DisplY, DisplZ, Y, Z, Ome;
int spnr;
RealType Error = 0;
int nTspots = CalcDiffrSpots(OrientMatrix,RTParamArr+5,OmeBoxArr,IntParamArr[1],
RTParamArr[5+MAX_N_RINGS+6],TheorSpots,hklscorr,n_arr);
for (int nrSp=0;nrSp<nMatched;nrSp++){
DisplacementInTheSpot(x[0],x[1],x[2],RTParamArr[0],spotsYZO[nrSp*9+5],
spotsYZO[nrSp*9+6],spotsYZO[nrSp*9+4],RTParamArr[20+MAX_N_RINGS]
,0,&DisplY,&DisplZ);
if (fabs(RTParamArr[20+MAX_N_RINGS]) > 0.02){
CorrectForOme(spotsYZO[nrSp*9+5]-DisplY,
spotsYZO[nrSp*9+6]-DisplZ,RTParamArr[0],
spotsYZO[nrSp*9+4],RTParamArr[19+MAX_N_RINGS],
RTParamArr[20+MAX_N_RINGS],&Y, &Z, &Ome);
}else{
Y = spotsYZO[nrSp*9+5]-DisplY;
Z = spotsYZO[nrSp*9+6]-DisplZ;
Ome = spotsYZO[nrSp*9+4];
}
spnr = (int) spotsYZO[nrSp*9+8];
for (int j=0;j<nTspots;j++){
if ((int)TheorSpots[j*8+7] == spnr){
Error += CalcNorm2(Y-TheorSpots[j*8+0],Z-TheorSpots[j*8+1]);
break;
}
}
}
return Error;
}
struct func_data_orient{
int *IntParamArr;
RealType *OmeBoxArr;
RealType *spotsCorrected;
RealType *hkls;
int *HKLInts;
int nMatched;
RealType *RTParamArr;
int *n_arr;
RealType *TheorSpots;
RealType *hklspace;
};
__device__ RealType pf_orient(int n, RealType *x, void *f_data_trial){
struct func_data_orient *f_data = (struct func_data_orient *) f_data_trial;
RealType *TheorSpots, *spotsYZO, *RTParamArr, *OmeBoxArr, *hkls,
*hklscorr, *SpotsCorrected;
OmeBoxArr = &(f_data->OmeBoxArr[0]);
spotsYZO = &(f_data->spotsCorrected[0]);
hkls = &(f_data->hkls[0]);
RTParamArr = &(f_data->RTParamArr[0]);
TheorSpots = &(f_data->TheorSpots[0]);
int nMatched = f_data->nMatched;
int *IntParamArr,*HKLInts,*n_arr;
IntParamArr = &(f_data->IntParamArr[0]);
HKLInts = &(f_data->HKLInts[0]);
n_arr = &(f_data->n_arr[0]);
hklscorr = &(f_data->hklspace[0]);
CorrectHKLsLatCInd(x+3,hkls,n_arr,RTParamArr,hklscorr,HKLInts);
RealType OrientMatrix[3][3];
Euler2OrientMat(x,OrientMatrix);
RealType *gObs, *gTh;
int spnr;
RealType Error = 0;
RealType tmpL;
int nTspots = CalcDiffrSpots(OrientMatrix,RTParamArr+5,OmeBoxArr,IntParamArr[1],
RTParamArr[5+MAX_N_RINGS+6],TheorSpots,hklscorr,n_arr);
for (int nrSp=0;nrSp<nMatched;nrSp++){
gObs = spotsYZO + nrSp*6 + 2;
spnr = (int) spotsYZO[nrSp*6+5];
for (int j=0;j<nTspots;j++){
if ((int)TheorSpots[j*8+7] == spnr){
gTh = TheorSpots + j*8 + 3;
tmpL = ((dot(gObs,gTh))/(CalcNorm3(gObs[0],gObs[1],gObs[2])*CalcNorm3(gTh[0],gTh[1],gTh[2])));
if (tmpL > 1) tmpL = 1;
if (tmpL < -1) tmpL = -1;
Error += fabs(acosd(tmpL));
break;
}
}
}
return Error;
}
struct func_data_strains{
int *IntParamArr;
RealType *OmeBoxArr;
RealType *spotsCorrected;
RealType *hkls;
int *HKLInts;
int nMatched;
RealType *RTParamArr;
int *n_arr;
RealType *Euler;
RealType *TheorSpots;
RealType *hklspace;
};
__device__ RealType pf_strains(int n, RealType *x, void *f_data_trial){
struct func_data_strains *f_data = (struct func_data_strains *) f_data_trial;
RealType *TheorSpots, *spotsYZO, *RTParamArr, *OmeBoxArr, *hkls,
*hklscorr, *SpotsCorrected, *Euler;
OmeBoxArr = &(f_data->OmeBoxArr[0]);
spotsYZO = &(f_data->spotsCorrected[0]);
hkls = &(f_data->hkls[0]);
RTParamArr = &(f_data->RTParamArr[0]);
TheorSpots = &(f_data->TheorSpots[0]);
int nMatched = f_data->nMatched;
int *IntParamArr,*HKLInts,*n_arr;
IntParamArr = &(f_data->IntParamArr[0]);
HKLInts = &(f_data->HKLInts[0]);
n_arr = &(f_data->n_arr[0]);
hklscorr = &(f_data->hklspace[0]);
CorrectHKLsLatCInd(x,hkls,n_arr,RTParamArr,hklscorr,HKLInts);
RealType OrientMatrix[3][3];
Euler = &(f_data->Euler[0]);
Euler2OrientMat(Euler,OrientMatrix);
RealType Y,Z;
int spnr;
RealType Error = 0;
int nTspots = CalcDiffrSpotsStrained(OrientMatrix,OmeBoxArr,IntParamArr[1],
RTParamArr[5+MAX_N_RINGS+6],TheorSpots,hklscorr,n_arr);
for (int nrSp=0;nrSp<nMatched;nrSp++){
Y = spotsYZO[nrSp*6+0];
Z = spotsYZO[nrSp*6+1];
spnr = (int) spotsYZO[nrSp*6+5];
for (int j=0;j<nTspots;j++){
if ((int)TheorSpots[j*8+7] == spnr){
Error += CalcNorm2(Y-TheorSpots[j*8+0],Z-TheorSpots[j*8+1]);
break;
}
}
}
return Error;
}
struct func_data_pos_sec{
int nMatched;
RealType *TheorSpots;
int nTspots;
RealType *spotsYZO;
RealType *RTParamArr;
};
__device__ RealType pf_posSec(int n, RealType *x, void *f_data_trial){
struct func_data_pos_sec *f_data = (struct func_data_pos_sec *) f_data_trial;
RealType *TheorSpots, *spotsYZO, *RTParamArr;
spotsYZO = &(f_data->spotsYZO[0]);
TheorSpots = &(f_data->TheorSpots[0]);
RTParamArr = &(f_data->RTParamArr[0]);
int nMatched = f_data->nMatched;
int nTspots = f_data->nTspots;
int spnr;
int Error = 0;
RealType DisplY, DisplZ, Y, Z, Ome;
for (int nrSp=0;nrSp<nMatched;nrSp++){
DisplacementInTheSpot(x[0],x[1],x[2],RTParamArr[0],spotsYZO[nrSp*9+5],
spotsYZO[nrSp*9+6],spotsYZO[nrSp*9+4],RTParamArr[20+MAX_N_RINGS]
,0,&DisplY,&DisplZ);
if (fabs(RTParamArr[20+MAX_N_RINGS]) > 0.02){
CorrectForOme(spotsYZO[nrSp*9+5]-DisplY,
spotsYZO[nrSp*9+6]-DisplZ,RTParamArr[0],
spotsYZO[nrSp*9+4],RTParamArr[19+MAX_N_RINGS],
RTParamArr[20+MAX_N_RINGS],&Y, &Z, &Ome);
}else{
Y = spotsYZO[nrSp*9+5]-DisplY;
Z = spotsYZO[nrSp*9+6]-DisplZ;
}
spnr = (int) spotsYZO[nrSp*9+8];
for (int j=0;j<nTspots;j++){
if ((int)TheorSpots[j*8+7] == spnr){
Error += CalcNorm2(Y-TheorSpots[j*8+0],Z-TheorSpots[j*8+1]);
break;
}
}
}
return Error;
}
__global__ void FitGrain(RealType *RTParamArr, int *IntParamArr,
int *n_arr, RealType *OmeBoxArr, RealType *hklsIn, int *HKLints,
int *nMatchedArr, RealType *spotsYZO_d, RealType *FitParams_d,
RealType *TheorSpots_d, RealType *scratch_d, RealType *hklspace_d,
RealType *x_d, RealType *xl_d, RealType *xu_d, RealType *xout_d,
RealType *xstep_d, RealType *CorrectSpots, RealType *TheorSpotsCorr,
RealType *Result_d){
int spotNr = blockIdx.x * blockDim.x + threadIdx.x;
if (spotNr >= n_arr[2]){
return;
}
RealType *spotsYZO, *FitParams, *TheorSpots, *scratch, *hklspace, *x,
*xl, *xu, *xout, *xstep, *spotsCorrected, *TheorSpotsCorrected,
*Result;
int nMatched, nMatchedTillNowRowNr, i;
nMatched = nMatchedArr[spotNr*3+0];
nMatchedTillNowRowNr = nMatchedArr[spotNr*3+2];
spotsYZO = spotsYZO_d + nMatchedTillNowRowNr * 9;
FitParams = FitParams_d + spotNr * 12;
Result = Result_d + spotNr *12;
TheorSpots = TheorSpots_d + n_arr[1]*2*spotNr*8;
TheorSpotsCorrected = TheorSpotsCorr + n_arr[1]*2*spotNr*8;
scratch = scratch_d + spotNr*((12+1)*(12+1)+3*12);
hklspace = hklspace_d + spotNr*n_arr[1]*7;
spotsCorrected = CorrectSpots + nMatchedTillNowRowNr*6;
x = x_d + 12*spotNr;
xl = xl_d + 12*spotNr;
xu = xu_d + 12*spotNr;
xout = xout_d + 12*spotNr;
xstep = xstep_d + 12*spotNr;
int n = 12;
for (i=0;i<12;i++){
x[i] = FitParams[i];
}
for (i=0;i<3;i++){
xl[i] = x[i] - RTParamArr[1];
xl[i+3] = x[i+3] - 0.01;
xl[i+6] = x[i+6]*(1 - RTParamArr[21+MAX_N_RINGS]/100);
xl[i+9] = x[i+9]*(1 - RTParamArr[22+MAX_N_RINGS]/100);
xu[i] = x[i] + RTParamArr[1];
xu[i+3] = x[i+3] + 0.01;
xu[i+6] = x[i+6]*(1 + RTParamArr[21+MAX_N_RINGS]/100);
xu[i+9] = x[i+9]*(1 + RTParamArr[22+MAX_N_RINGS]/100);
}
for (i=0;i<n;i++){
xstep[i] = fabs(xu[i]-xl[i])*0.25;
}
struct func_data_pos_ini f_data;
f_data.HKLInts = HKLints;
f_data.IntParamArr = IntParamArr;
f_data.OmeBoxArr = OmeBoxArr;
f_data.RTParamArr = RTParamArr;
f_data.hkls = hklsIn;
f_data.nMatched = nMatched;
f_data.n_arr = n_arr;
f_data.spotsYZO = spotsYZO;
f_data.TheorSpots = TheorSpots;
f_data.hklspace = hklspace;
struct func_data_pos_ini *f_datat;
f_datat = &f_data;
void *trp = (struct func_data_pos_ini *) f_datat;
RealType minf;
RealType reqmin = 1e-8;
int konvge = 10;
int kcount = MAX_N_EVALS;
int icount, numres, ifault;
//if (spotNr == 0) printf("Pos in: %lf %lf %lf %lf\n",pf_posIni(n,x,trp),x[0],x[1],x[2]);
nelmin(pf_posIni, n, x, xout, xl, xu, scratch, &minf, reqmin, xstep, konvge, kcount/4, &icount, &numres, &ifault, trp);
//if (spotNr == 0) printf("Pos out: %lf %lf %lf %lf\n",pf_posIni(n,xout,trp),xout[0],xout[1],xout[2]);
//if (ifault !=0) printf("Not optimized completely.\n");
RealType Pos[3] = {xout[0],xout[1],xout[2]};
RealType DisplY, DisplZ, Y, Z, Ome, g[3], Theta, lenK;
for (int nrSp=0;nrSp<nMatched;nrSp++){
DisplacementInTheSpot(xout[0],xout[1],xout[2],RTParamArr[0],spotsYZO[nrSp*9+5],
spotsYZO[nrSp*9+6],spotsYZO[nrSp*9+4],RTParamArr[20+MAX_N_RINGS],
0,&DisplY,&DisplZ);
if (fabs(RTParamArr[20+MAX_N_RINGS]) > 0.02){
CorrectForOme(spotsYZO[nrSp*9+5]-DisplY,
spotsYZO[nrSp*9+6]-DisplZ,RTParamArr[0],
spotsYZO[nrSp*9+4],RTParamArr[19+MAX_N_RINGS],
RTParamArr[20+MAX_N_RINGS],&Y, &Z, &Ome);
}else{
Y = spotsYZO[nrSp*9+5]-DisplY;
Z = spotsYZO[nrSp*9+6]-DisplZ;
Ome = spotsYZO[nrSp*9+4];
}
Theta = atand(CalcNorm2(Y,Z)/RTParamArr[0])/2;
lenK = CalcNorm3(RTParamArr[0],Y,Z);
SpotToGv(RTParamArr[0]/lenK,Y/lenK,Z/lenK,Ome,Theta,&spotsCorrected[nrSp*6+2],
&spotsCorrected[nrSp*6+3],&spotsCorrected[nrSp*6+4]);
spotsCorrected[nrSp*6+0] = Y;
spotsCorrected[nrSp*6+1] = Z;
spotsCorrected[nrSp*6+5] = spotsYZO[nrSp*9+8];
}
n = 9;
for (i=0;i<9;i++){
x[i] = FitParams[i+3];
}
for (i=0;i<3;i++){
xl[i] = x[i] - 2;
xl[i+3] = x[i+3]*(1 - RTParamArr[21+MAX_N_RINGS]/100);
xl[i+6] = x[i+6]*(1 - RTParamArr[22+MAX_N_RINGS]/100);
xu[i] = x[i] + 2;
xu[i+3] = x[i+3]*(1 + RTParamArr[21+MAX_N_RINGS]/100);
xu[i+6] = x[i+6]*(1 + RTParamArr[22+MAX_N_RINGS]/100);
}
for (i=0;i<n;i++){
xstep[i] = fabs(xu[i]-xl[i])*0.25;
}
struct func_data_orient f_data2;
f_data2.HKLInts = HKLints;
f_data2.IntParamArr = IntParamArr;
f_data2.OmeBoxArr = OmeBoxArr;
f_data2.RTParamArr = RTParamArr;
f_data2.hkls = hklsIn;
f_data2.nMatched = nMatched;
f_data2.n_arr = n_arr;
f_data2.spotsCorrected = spotsCorrected;
f_data2.TheorSpots = TheorSpots;
f_data2.hklspace = hklspace;
struct func_data_orient *f_datat2;
f_datat2 = &f_data2;
void *trp2 = (struct func_data_orient *) f_datat2;
//if (spotNr == 0) printf("Orient in: %lf %lf %lf %lf\n",pf_orient(n,x,trp2),x[0],x[1],x[2]);
nelmin(pf_orient, n, x, xout, xl, xu, scratch, &minf, reqmin, xstep, konvge, kcount/3, &icount, &numres, &ifault, trp2);
//if (spotNr == 0) printf("Orient out: %lf %lf %lf %lf\n",pf_orient(n,xout,trp2),xout[0],xout[1],xout[2]);
//if (ifault !=0) printf("Not optimized completely.\n");
RealType Euler[3] = {xout[0],xout[1],xout[2]};
n = 6;
for (i=0;i<n;i++){
x[i] = FitParams[i+6];
}
for (i=0;i<3;i++){
xl[i] = x[i]*(1 - RTParamArr[21+MAX_N_RINGS]/100);
xl[i+3] = x[i+3]*(1 - RTParamArr[22+MAX_N_RINGS]/100);
xu[i] = x[i]*(1 + RTParamArr[21+MAX_N_RINGS]/100);
xu[i+3] = x[i+3]*(1 + RTParamArr[22+MAX_N_RINGS]/100);
}
for (i=0;i<n;i++){
xstep[i] = fabs(xu[i]-xl[i])*0.25;
}
struct func_data_strains f_data3;
f_data3.Euler = Euler;
f_data3.HKLInts = HKLints;
f_data3.IntParamArr = IntParamArr;
f_data3.OmeBoxArr = OmeBoxArr;
f_data3.RTParamArr = RTParamArr;
f_data3.hkls = hklsIn;
f_data3.nMatched = nMatched;
f_data3.n_arr = n_arr;
f_data3.spotsCorrected = spotsCorrected;
f_data3.TheorSpots = TheorSpots;
f_data3.hklspace = hklspace;
struct func_data_strains *f_datat3;
f_datat3 = &f_data3;
void *trp3 = (struct func_data_strains *) f_datat3;
//if (spotNr == 0) printf("Strains in: %lf %lf %lf %lf %lf %lf %lf\n",pf_strains(n,x,trp3),x[0],x[1],x[2],x[3],x[4],x[5]);
nelmin(pf_strains, n, x, xout, xl, xu, scratch, &minf, reqmin, xstep, konvge, kcount/2, &icount, &numres, &ifault, trp3);
//if (spotNr == 0) printf("Strains out: %lf %lf %lf %lf %lf %lf %lf\n",pf_strains(n,xout,trp3),xout[0],xout[1],xout[2],xout[3],xout[4],xout[5]);
//if (ifault !=0) printf("Not optimized completely.\n");
RealType LatCFit[6] = {xout[0],xout[1],xout[2],xout[3],xout[4],xout[5]};
n = 3;
RealType OM[3][3];
Euler2OrientMat(Euler,OM);
CorrectHKLsLatCInd(LatCFit,hklsIn,n_arr,RTParamArr,hklspace,HKLints);
int nTspots = CalcDiffrSpotsStrained(OM,OmeBoxArr,IntParamArr[1],
RTParamArr[5+MAX_N_RINGS+6],TheorSpotsCorrected,hklspace,n_arr);
for (int i=0;i<3;i++){
x[i] = Pos[i];
xl[i] = x[i] - RTParamArr[1];
xu[i] = x[i] + RTParamArr[1];
xstep[i] = fabs(xu[i]-xl[i])*0.25;
}
struct func_data_pos_sec f_data4;
f_data4.RTParamArr = RTParamArr;
f_data4.nMatched = nMatched;
f_data4.TheorSpots = TheorSpots;
f_data4.nTspots = nTspots;
f_data4.spotsYZO = spotsYZO;
struct func_data_pos_sec *f_datat4;
f_datat4 = &f_data4;
void *trp4 = (struct func_data_pos_sec *) f_datat4;
//if (spotNr == 0) printf("Pos2 in: %lf %lf %lf %lf\n",pf_posSec(n,x,trp4),x[0],x[1],x[2]);
nelmin(pf_posSec, n, x, xout, xl, xu, scratch, &minf, reqmin, xstep, konvge, kcount, &icount, &numres, &ifault, trp4);
//if (spotNr == 0) printf("Pos2 out: %lf %lf %lf %lf\n",pf_posSec(n,xout,trp4),xout[0],xout[1],xout[2]);
//if (ifault !=0) printf("Not optimized completely.\n");
RealType Pos2[3] = {xout[0],xout[1],xout[2]};
for (i=0;i<3;i++){
Result[i] = Pos2[i];
Result[i+3] = Euler[i];
Result[i+6] = LatCFit[i];
Result[i+9] = LatCFit[i+3];
}
}
__global__ void FitGrain_NLOPT(RealType *RTParamArr, int *IntParamArr,
int *n_arr, RealType *OmeBoxArr, RealType *hklsIn, int *HKLints,
int *nMatchedArr, RealType *spotsYZO_d, RealType *FitParams_d,
RealType *TheorSpots_d, RealType *scratch_d, RealType *hklspace_d,
RealType *x_d, RealType *xl_d, RealType *xu_d, RealType *xout_d,
RealType *xstep_d, RealType *CorrectSpots, RealType *TheorSpotsCorr,
RealType *Result_d){
int spotNr = blockIdx.x * blockDim.x + threadIdx.x;
if (spotNr >= n_arr[2]){
return;
}
RealType *spotsYZO, *FitParams, *TheorSpots, *scratch, *hklspace, *x,
*xl, *xu, *xout, *xstep, *spotsCorrected, *TheorSpotsCorrected,
*Result;
int nMatched, nMatchedTillNowRowNr, i;
nMatched = nMatchedArr[spotNr*3+0];
nMatchedTillNowRowNr = nMatchedArr[spotNr*3+2];
spotsYZO = spotsYZO_d + nMatchedTillNowRowNr * 9;
FitParams = FitParams_d + spotNr * 12;
Result = Result_d + spotNr *12;
TheorSpots = TheorSpots_d + n_arr[1]*2*spotNr*8;
TheorSpotsCorrected = TheorSpotsCorr + n_arr[1]*2*spotNr*8;
scratch = scratch_d + spotNr*((12+1)*(12+1)+3*12);
hklspace = hklspace_d + spotNr*n_arr[1]*7;
spotsCorrected = CorrectSpots + nMatchedTillNowRowNr*6;
x = x_d + 12*spotNr;
xl = xl_d + 12*spotNr;
xu = xu_d + 12*spotNr;
xout = xout_d + 12*spotNr;
xstep = xstep_d + 12*spotNr;
int n = 12;
for (i=0;i<12;i++){
x[i] = FitParams[i];
}
for (i=0;i<3;i++){
xl[i] = x[i] - RTParamArr[1];
xl[i+3] = x[i+3] - 0.01;
xl[i+6] = x[i+6]*(1 - RTParamArr[21+MAX_N_RINGS]/100);
xl[i+9] = x[i+9]*(1 - RTParamArr[22+MAX_N_RINGS]/100);
xu[i] = x[i] + RTParamArr[1];
xu[i+3] = x[i+3] + 0.01;
xu[i+6] = x[i+6]*(1 + RTParamArr[21+MAX_N_RINGS]/100);
xu[i+9] = x[i+9]*(1 + RTParamArr[22+MAX_N_RINGS]/100);
}
for (i=0;i<n;i++){
xstep[i] = fabs(xu[i]-xl[i])*0.25;
}
struct func_data_pos_ini f_data;
f_data.HKLInts = HKLints;
f_data.IntParamArr = IntParamArr;
f_data.OmeBoxArr = OmeBoxArr;
f_data.RTParamArr = RTParamArr;
f_data.hkls = hklsIn;
f_data.nMatched = nMatched;
f_data.n_arr = n_arr;
f_data.spotsYZO = spotsYZO;
f_data.TheorSpots = TheorSpots;
f_data.hklspace = hklspace;
struct func_data_pos_ini *f_datat;
f_datat = &f_data;
void *trp = (struct func_data_pos_ini *) f_datat;
RealType minf;
RealType reqmin = 1e-8;
int konvge = 10;
int kcount = MAX_N_EVALS;
int icount, numres, ifault;
nlopt_stopping stop;
stop.n = n;
stop.maxeval = MAX_N_EVALS;
stop.ftol_rel = reqmin;
stop.xtol_rel = reqmin;
stop.minf_max = reqmin;
nlopt_func f = &pf_posIni;
nlopt_result res = NLOPT_SUCCESS;
if (spotNr == 0) printf("%lf\n",pf_posIni(n,x,trp));
res = nldrmd_minimize(n,f,trp,xl,xu,x,&minf,xstep,&stop,scratch);
if (spotNr == 0) printf("%lf\n",pf_posIni(n,x,trp));
for (i=0;i<n;i++) xout[i] = x[i];
if (res !=1) printf("Not optimized completely. %d, %lf\n",res,minf);
RealType Pos[3] = {xout[0],xout[1],xout[2]};
RealType DisplY, DisplZ, Y, Z, Ome, g[3], Theta, lenK;
for (int nrSp=0;nrSp<nMatched;nrSp++){
DisplacementInTheSpot(xout[0],xout[1],xout[2],RTParamArr[0],spotsYZO[nrSp*9+5],
spotsYZO[nrSp*9+6],spotsYZO[nrSp*9+4],RTParamArr[20+MAX_N_RINGS],
0,&DisplY,&DisplZ);
if (fabs(RTParamArr[20+MAX_N_RINGS]) > 0.02){
CorrectForOme(spotsYZO[nrSp*9+5]-DisplY,
spotsYZO[nrSp*9+6]-DisplZ,RTParamArr[0],
spotsYZO[nrSp*9+4],RTParamArr[19+MAX_N_RINGS],
RTParamArr[20+MAX_N_RINGS],&Y, &Z, &Ome);
}else{
Y = spotsYZO[nrSp*9+5]-DisplY;
Z = spotsYZO[nrSp*9+6]-DisplZ;
Ome = spotsYZO[nrSp*9+4];
}
Theta = atand(CalcNorm2(Y,Z)/RTParamArr[0])/2;
lenK = CalcNorm3(RTParamArr[0],Y,Z);
SpotToGv(RTParamArr[0]/lenK,Y/lenK,Z/lenK,Ome,Theta,&spotsCorrected[nrSp*6+2],
&spotsCorrected[nrSp*6+3],&spotsCorrected[nrSp*6+4]);
spotsCorrected[nrSp*6+0] = Y;
spotsCorrected[nrSp*6+1] = Z;
spotsCorrected[nrSp*6+5] = spotsYZO[nrSp*9+8];
}
n = 9;
for (i=0;i<9;i++){
x[i] = FitParams[i+3];
}
for (i=0;i<3;i++){
xl[i] = x[i] - 2;
xl[i+3] = x[i+3]*(1 - RTParamArr[21+MAX_N_RINGS]/100);
xl[i+6] = x[i+6]*(1 - RTParamArr[22+MAX_N_RINGS]/100);
xu[i] = x[i] + 2;
xu[i+3] = x[i+3]*(1 + RTParamArr[21+MAX_N_RINGS]/100);
xu[i+6] = x[i+6]*(1 + RTParamArr[22+MAX_N_RINGS]/100);
}
for (i=0;i<n;i++){
xstep[i] = fabs(xu[i]-xl[i])*0.25;
}
struct func_data_orient f_data2;
f_data2.HKLInts = HKLints;
f_data2.IntParamArr = IntParamArr;
f_data2.OmeBoxArr = OmeBoxArr;
f_data2.RTParamArr = RTParamArr;
f_data2.hkls = hklsIn;
f_data2.nMatched = nMatched;
f_data2.n_arr = n_arr;
f_data2.spotsCorrected = spotsCorrected;
f_data2.TheorSpots = TheorSpots;
f_data2.hklspace = hklspace;
struct func_data_orient *f_datat2;
f_datat2 = &f_data2;
void *trp2 = (struct func_data_orient *) f_datat2;
stop.n = n;
f = &pf_orient;
res = nldrmd_minimize(n,f,trp2,xl,xu,x,&minf,xstep,&stop,scratch);
for (i=0;i<n;i++) xout[i] = x[i];
if (res !=1) printf("Not optimized completely. %d, %lf\n",res,minf);
RealType Euler[3] = {xout[0],xout[1],xout[2]};
n = 6;
for (i=0;i<n;i++){
x[i] = FitParams[i+6];
}
for (i=0;i<3;i++){
xl[i] = x[i]*(1 - RTParamArr[21+MAX_N_RINGS]/100);
xl[i+3] = x[i+3]*(1 - RTParamArr[22+MAX_N_RINGS]/100);
xu[i] = x[i]*(1 + RTParamArr[21+MAX_N_RINGS]/100);
xu[i+3] = x[i+3]*(1 + RTParamArr[22+MAX_N_RINGS]/100);
}
for (i=0;i<n;i++){
xstep[i] = fabs(xu[i]-xl[i])*0.25;
}
struct func_data_strains f_data3;
f_data3.Euler = Euler;
f_data3.HKLInts = HKLints;
f_data3.IntParamArr = IntParamArr;
f_data3.OmeBoxArr = OmeBoxArr;
f_data3.RTParamArr = RTParamArr;
f_data3.hkls = hklsIn;
f_data3.nMatched = nMatched;
f_data3.n_arr = n_arr;
f_data3.spotsCorrected = spotsCorrected;
f_data3.TheorSpots = TheorSpots;
f_data3.hklspace = hklspace;
struct func_data_strains *f_datat3;
f_datat3 = &f_data3;
void *trp3 = (struct func_data_strains *) f_datat3;
stop.n = n;
f = &pf_strains;
res = nldrmd_minimize(n,f,trp3,xl,xu,x,&minf,xstep,&stop,scratch);
for (i=0;i<n;i++) xout[i] = x[i];
if (res !=1) printf("Not optimized completely. %d, %lf\n",res,minf);
RealType LatCFit[6] = {xout[0],xout[1],xout[2],xout[3],xout[4],xout[5]};
n = 3;
RealType OM[3][3];
Euler2OrientMat(Euler,OM);
CorrectHKLsLatCInd(LatCFit,hklsIn,n_arr,RTParamArr,hklspace,HKLints);
int nTspots = CalcDiffrSpots(OM,RTParamArr+5,OmeBoxArr,IntParamArr[1],
RTParamArr[5+MAX_N_RINGS+6],TheorSpotsCorrected,hklspace,n_arr);
for (int i=0;i<3;i++){
x[i] = Pos[i];
xl[i] = x[i] - RTParamArr[1];
xu[i] = x[i] + RTParamArr[1];
xstep[i] = fabs(xu[i]-xl[i])*0.25;
}
struct func_data_pos_sec f_data4;
f_data4.RTParamArr = RTParamArr;
f_data4.nMatched = nMatched;
f_data4.TheorSpots = TheorSpots;
f_data4.nTspots = nTspots;
f_data4.spotsYZO = spotsYZO;
struct func_data_pos_sec *f_datat4;
f_datat4 = &f_data4;
void *trp4 = (struct func_data_pos_sec *) f_datat4;
stop.n = n;
f = &pf_posSec;
res = nldrmd_minimize(n,f,trp4,xl,xu,x,&minf,xstep,&stop,scratch);
for (i=0;i<n;i++) xout[i] = x[i];
if (res !=1) printf("Not optimized completely. %d, %lf\n",res,minf);
RealType Pos2[3] = {xout[0],xout[1],xout[2]};
for (i=0;i<3;i++){
Result[i] = Pos2[i];
Result[i+3] = Euler[i];
Result[i+6] = LatCFit[i];
Result[i+9] = LatCFit[i+3];
}
}
__global__ void CalcAngleErrors(RealType *RTParamArr, int *IntParamArr,
int *n_arr, RealType *OmeBoxArr, RealType *hkls_c, int *nMatchedArr,
RealType *spotsYZO_d, RealType *x_d, RealType *TheorSpots_d,
RealType *SpotsComp_d, RealType *Error_d, RealType *hklsIn, int *HKLints)
{
int spotNr = blockIdx.x * blockDim.x + threadIdx.x;
if (spotNr >= n_arr[2]){
return;
}
RealType *hkls, *spotsYZO, *x, *TheorSpots;
RealType *SpotsComp, *Error;
int nMatched, nspots, nMatchedTillNowRowNr;
hkls = hkls_c + spotNr*n_arr[1]*7;
RealType *LatC_d;
LatC_d = x_d + spotNr*12 + 6;
CorrectHKLsLatCInd(LatC_d, hklsIn, n_arr, RTParamArr, hkls, HKLints);
nMatched = nMatchedArr[spotNr*3+0];
nMatchedTillNowRowNr = nMatchedArr[spotNr*3+2];
spotsYZO = spotsYZO_d + nMatchedTillNowRowNr*9;
x = x_d + spotNr*12;
SpotsComp = SpotsComp_d + nMatchedTillNowRowNr*22;
Error = Error_d + spotNr*3;
Error[0] = 0; Error[1] = 0; Error[2] = 0;
TheorSpots = TheorSpots_d + n_arr[1]*2*spotNr*8;
RealType OrientationMatrix[3][3];
Euler2OrientMat(x+3,OrientationMatrix);
int nTspots = CalcDiffrSpotsStrained(OrientationMatrix,OmeBoxArr,IntParamArr[1],
RTParamArr[5+MAX_N_RINGS+6],TheorSpots,hkls,n_arr);
RealType DisplY, DisplZ, Y, Z, Ome, Theta, lenK, go[3], *gth, angle, distt, omediff, tmpL;
int spnr;
for (int nrSp=0;nrSp<nMatched;nrSp++){
DisplacementInTheSpot(x[0],x[1],x[2],RTParamArr[0],spotsYZO[nrSp*9+5],
spotsYZO[nrSp*9+6],spotsYZO[nrSp*9+4],RTParamArr[20+MAX_N_RINGS],
0,&DisplY,&DisplZ);
if (fabs(RTParamArr[20+MAX_N_RINGS]) > 0.02){
CorrectForOme(spotsYZO[nrSp*9+5]-DisplY,
spotsYZO[nrSp*9+6]-DisplZ,RTParamArr[0],
spotsYZO[nrSp*9+4],RTParamArr[19+MAX_N_RINGS],
RTParamArr[20+MAX_N_RINGS],&Y,
&Z,&Ome);
}else{
Y = spotsYZO[nrSp*9+5]-DisplY;
Z = spotsYZO[nrSp*9+6]-DisplZ;
Ome = spotsYZO[nrSp*9+4];
}
Theta = 0.5*atand(CalcNorm2(Y,Z)/RTParamArr[0]);
lenK = CalcNorm3(RTParamArr[0],Y,Z);
SpotToGv(RTParamArr[0]/lenK,Y/lenK,Z/lenK,Ome,Theta,&go[0],&go[1],&go[2]);
spnr = (int) spotsYZO[nrSp*9+8];
for (int i=0;i<nTspots;i++){
if ((int)TheorSpots[i*8+7] == spnr){
gth = TheorSpots + i*8 + 3;
tmpL = ((dot(go,gth))/(CalcNorm3(go[0],go[1],go[2])*CalcNorm3(gth[0],gth[1],gth[2])));
if (tmpL > 1) tmpL = 1;
if (tmpL < -1) tmpL = -1;
angle = fabs(acosd(tmpL));
distt = CalcNorm2(Y-TheorSpots[i*8+0],Z-TheorSpots[i*8+1]);
omediff = fabs(Ome - TheorSpots[i*8+2]);
Error[0] += fabs(angle/nMatched);
Error[1] += fabs(distt/nMatched);
Error[2] += fabs(omediff/nMatched);
SpotsComp[nrSp*22+0] = spotsYZO[nrSp*9+3];
SpotsComp[nrSp*22+1] = Y;
SpotsComp[nrSp*22+2] = Z;
SpotsComp[nrSp*22+3] = Ome;
SpotsComp[nrSp*22+4] = go[0];
SpotsComp[nrSp*22+5] = go[1];
SpotsComp[nrSp*22+6] = go[2];
for (int j=0;j<6;j++){
SpotsComp[nrSp*22+j+7] = TheorSpots[i*8+j];
}
SpotsComp[nrSp*22+13]=spotsYZO[nrSp*9+0];
SpotsComp[nrSp*22+14]=spotsYZO[nrSp*9+1];
SpotsComp[nrSp*22+15]=spotsYZO[nrSp*9+2];
SpotsComp[nrSp*22+16]=spotsYZO[nrSp*9+4];
SpotsComp[nrSp*22+17]=spotsYZO[nrSp*9+5];
SpotsComp[nrSp*22+18]=spotsYZO[nrSp*9+6];
SpotsComp[nrSp*22+19]=angle;
SpotsComp[nrSp*22+20]=distt;
SpotsComp[nrSp*22+21]=omediff;
break;
}
}
}
}
__global__ void CompareDiffractionSpots(RealType *AllTheorSpots, RealType *RTParamArr,
int maxPos, RealType *ResultArr, int PosResultArr, int *nTspotsArr,
int *data, int *ndata, RealType *ObsSpots, RealType *etamargins, int *AllGrainSpots,
RealType *IAs, int *n_arr, int *nMatchedArr, int n_min, int nOrients, RealType *GS,
RealType *AllSpotsYZO, RealType *SpotsInfo_d, RealType *Orientations, RealType *OrientationsOut){
int nPos, orientPos, overallPos; // Position Calculate!!
overallPos = blockIdx.x * blockDim.x + threadIdx.x;
if (overallPos >= maxPos){
return;
}
nPos = overallPos / nOrients;
orientPos = overallPos % nOrients;
nMatchedArr[overallPos] = 0;
int n = n_min + nPos;
RealType *TheorSpots;
TheorSpots = AllTheorSpots + n_arr[1]*2*N_COL_THEORSPOTS*orientPos;
for (int i=0;i<9;i++){
OrientationsOut[10*overallPos + i] = Orientations[9*orientPos + i];
}
OrientationsOut[10*overallPos + 9] = (RealType) nTspotsArr[orientPos];
int *GrainSpots;
GrainSpots = AllGrainSpots + overallPos * n_arr[1] * 2;
RealType *SpotsInfo;
SpotsInfo = SpotsInfo_d + overallPos * n_arr[1] * 2 * 9;
RealType y0, z0, xi, yi, zi, ys, zs,omega,RefRad;
y0 = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 7];
z0 = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 8];
xi = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 9];
yi = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 10];
zi = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 11];
ys = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 12];
zs = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 13];
omega = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 14];
RefRad = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 15];
RealType Displ_y, Displ_z;
int nTspots, nMatched, MatchFound;
RealType diffOmeBest, diffOme;
long long unsigned Pos, Pos1, Pos2, Pos3;
int nspots, DataPos;
long long unsigned spotRow,spotRowBest;
RealType omeo, ometh, gvo[3], gvth[3], lo, lth, tmp, go[3], gth[3],gs[3];
RealType n_eta_bins, n_ome_bins, t;
n_eta_bins = ceil(360.0 / RTParamArr[5 + MAX_N_RINGS + 4]);
n_ome_bins = ceil(360.0 / RTParamArr[5 + MAX_N_RINGS + 5]);
gs[0] = ((RTParamArr[3])*(n/xi)*xi*cos(omega*deg2rad)) +
((ys - y0 + (RTParamArr[3])*(n/xi)*yi)*sin(omega*deg2rad));
gs[1] = ((ys - y0 + (RTParamArr[3])*(n/xi)*yi)*cos(
omega*deg2rad)) - ((RTParamArr[3])*(n/xi)*xi*sin(omega*deg2rad));
gs[2] = zs - z0 + (RTParamArr[3])*(n/xi)*zi;
GS[overallPos*3 + 0] = gs[0];
GS[overallPos*3 + 1] = gs[1];
GS[overallPos*3 + 2] = gs[2];
nMatched = 0;
nTspots = nTspotsArr[orientPos];
IAs[overallPos] = 0;
if (fabs(zs - z0 + (RTParamArr[3])*(n/xi)*zi) > RTParamArr[2] /2) {
nMatchedArr[overallPos] = 0;
return;
}
RealType theta, lenK, yobs, zobs, thy, thz, thEta, thrad;
for (int sp = 0 ; sp < nTspots ; sp++) {
ometh = TheorSpots[sp*N_COL_THEORSPOTS+2];
t = (gs[0]*cos(deg2rad * ometh) - gs[1]*sin(deg2rad * ometh))/xi;
Displ_y = ((gs[0]*sin(deg2rad * ometh))+ (gs[1]*cos(deg2rad * ometh))) - t* yi;
Displ_z = gs[2] - t*zi;
thy = TheorSpots[sp*N_COL_THEORSPOTS+0] + Displ_y;
thz = TheorSpots[sp*N_COL_THEORSPOTS+1] + Displ_z;
thEta = CalcEtaAngle(thy,thz);
thrad = CalcNorm2(thy,thz) - RTParamArr[5 + (int)TheorSpots[sp*N_COL_THEORSPOTS+3]];
MatchFound = 0;
diffOmeBest = 100000;
Pos1 = (((int) TheorSpots[sp*N_COL_THEORSPOTS+3])-1)*n_eta_bins*n_ome_bins;
Pos2 = ((int)(floor((180+thEta)/RTParamArr[5 + MAX_N_RINGS + 4])))*n_ome_bins;
Pos3 = ((int)floor((180+TheorSpots[sp*N_COL_THEORSPOTS+2])/RTParamArr[5 + MAX_N_RINGS + 5]));
Pos = Pos1 + Pos2 + Pos3;
nspots = *(ndata+ Pos*2);
if (nspots == 0){
continue;
}
DataPos = *(ndata + Pos*2+1);
for (int iSpot = 0 ; iSpot < nspots; iSpot++ ) {
spotRow = *(data+DataPos + iSpot);
if ( fabs(thrad - ObsSpots[spotRow*9+8]) < RTParamArr[5 + MAX_N_RINGS + 3] ) {
if ( fabs(RefRad - ObsSpots[spotRow*9+3]) < RTParamArr[5 + MAX_N_RINGS + 2] ) {
if ( fabs(thEta - ObsSpots[spotRow*9+6]) < etamargins[(int) TheorSpots[sp*N_COL_THEORSPOTS+3]] ) {
diffOme = fabs(TheorSpots[sp*N_COL_THEORSPOTS+2] - ObsSpots[spotRow*9+2]);
if ( diffOme < diffOmeBest ) {
diffOmeBest = diffOme;
spotRowBest = spotRow;
MatchFound = 1;
}
}
}
}
}
if (MatchFound == 1) {
if ((int)AllSpotsYZO[spotRowBest*8+3] != (int)ObsSpots[spotRowBest*9+4]) return;
for (int i=0;i<8;i++){
SpotsInfo[nMatched * 9 + i] = AllSpotsYZO[spotRowBest * 8 + i];
}
SpotsInfo[nMatched * 9 + 8] = TheorSpots[sp*N_COL_THEORSPOTS+4];
GrainSpots[nMatched] = (int) ObsSpots[spotRowBest*9+4];
omeo = ObsSpots[spotRowBest*9+2];
ometh = TheorSpots[sp*N_COL_THEORSPOTS+2];
theta = atand(CalcNorm2(TheorSpots[sp*N_COL_THEORSPOTS+0],TheorSpots[sp*N_COL_THEORSPOTS+1])/RTParamArr[0])/2;
lenK = CalcNorm3(RTParamArr[0],TheorSpots[sp*N_COL_THEORSPOTS+0],TheorSpots[sp*N_COL_THEORSPOTS+1]);
SpotToGv(RTParamArr[0]/lenK,TheorSpots[sp*N_COL_THEORSPOTS+0]/lenK,TheorSpots[sp*N_COL_THEORSPOTS+1]/lenK,ometh,theta,&gvth[0],&gvth[1],&gvth[2]);
t = (gs[0]*cos(deg2rad * omeo) - gs[1]*sin(deg2rad * omeo))/xi;
Displ_y = ((gs[0]*sin(deg2rad * omeo))+ (gs[1]*cos(deg2rad * omeo))) - t* yi;
Displ_z = gs[2] - t*zi;
yobs = ObsSpots[spotRowBest*9+0]-Displ_y;
zobs = ObsSpots[spotRowBest*9+1]-Displ_z;
theta = atand(CalcNorm2(yobs,zobs)/RTParamArr[0])/2;
lenK = CalcNorm3(RTParamArr[0],yobs,zobs);
SpotToGv(RTParamArr[0]/lenK,yobs/lenK,zobs/lenK,omeo,theta,&gvo[0],&gvo[1],&gvo[2]);
lo = CalcLength(gvo[0],gvo[1],gvo[2]);
lth = CalcLength(gvth[0],gvth[1],gvth[2]);
tmp = dot(gvo,gvth)/(lo*lth);
if (tmp >1) tmp = 1;
else if (tmp < -1) tmp = -1;
IAs[overallPos] += rad2deg * acos(tmp);
nMatched++;
}
}
IAs[overallPos] /= (RealType)nMatched;
nMatchedArr[overallPos] = nMatched;
}
__global__ void ReturnDiffractionSpots(RealType *RTParamArr, RealType *OmeBoxArr,
int *IntParamArr, RealType *AllTheorSpots, RealType *hkls, int *n_arr, int PosResultArr,
RealType *ResultArr, int norients, int *nSpotsArr, RealType *Orientations){
int orient = blockIdx.x * blockDim.x + threadIdx.x;
if (orient >= norients) return;
RealType *TheorSpots = AllTheorSpots + n_arr[1]*2*N_COL_THEORSPOTS*orient;
RealType hkl[3], hklnormal[3];
hkl[0] = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 0];
hkl[1] = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 1];
hkl[2] = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 2];
hklnormal[0] = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 3];
hklnormal[1] = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 4];
hklnormal[2] = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 5];
RealType v[3];
crossProduct(v, hkl, hklnormal);
RealType RotMat[3][3];
RealType RotMat2[3][3];
RealType RotMat3[3][3];
RealType hkllen = sqrt(hkl[0]*hkl[0] + hkl[1]*hkl[1] + hkl[2]*hkl[2]);
RealType hklnormallen = sqrt(hklnormal[0]*hklnormal[0] + hklnormal[1]*hklnormal[1] + hklnormal[2]*hklnormal[2]);
RealType dotpr = dot(hkl, hklnormal);
RealType angled = rad2deg * acos(dotpr/(hkllen*hklnormallen));
AxisAngle2RotMatrix(v, rad2deg * acos(dot(hkl, hklnormal)/
(sqrt(hkl[0]*hkl[0] + hkl[1]*hkl[1] + hkl[2]*hkl[2])*sqrt(
hklnormal[0]*hklnormal[0] + hklnormal[1]*hklnormal[1] +
hklnormal[2]*hklnormal[2]))), RotMat);
AxisAngle2RotMatrix(hklnormal, orient*RTParamArr[4], RotMat2);
MatrixMultF33(RotMat2, RotMat, RotMat3);
nSpotsArr[orient] = CalcDiffrSpots_Furnace(RotMat3,
RTParamArr + 5, OmeBoxArr, IntParamArr[1],
RTParamArr[5 + MAX_N_RINGS + 6], TheorSpots, hkls,n_arr);
int PosUse = 9*orient;
Orientations[PosUse + 0] = RotMat3[0][0];
Orientations[PosUse + 1] = RotMat3[0][1];
Orientations[PosUse + 2] = RotMat3[0][2];
Orientations[PosUse + 3] = RotMat3[1][0];
Orientations[PosUse + 4] = RotMat3[1][1];
Orientations[PosUse + 5] = RotMat3[1][2];
Orientations[PosUse + 6] = RotMat3[2][0];
Orientations[PosUse + 7] = RotMat3[2][1];
Orientations[PosUse + 8] = RotMat3[2][2];
}
__global__ void MakeOrientations(RealType *ResultArr, int *HKLints,
int *IntParamArr, RealType *RTParamArr, int *ResultOut, int sumTotal){
int ID = blockIdx.x * blockDim.x + threadIdx.x;
if (ID >= sumTotal) return;
RealType y0, xi, yi, ys;
y0 = ResultArr[ID * N_COLS_FRIEDEL_RESULTS + 7];
xi = ResultArr[ID * N_COLS_FRIEDEL_RESULTS + 9];
yi = ResultArr[ID * N_COLS_FRIEDEL_RESULTS + 10];
ys = ResultArr[ID * N_COLS_FRIEDEL_RESULTS + 12];
RealType RotationAngles = CalcRotationAngle(((int) ResultArr[ID * N_COLS_FRIEDEL_RESULTS + 6]), HKLints, IntParamArr, RTParamArr);
ResultOut[ID*N_COLS_ORIENTATION_NUMBERS + 0] = (int) RotationAngles/RTParamArr[4];
ResultOut[ID*N_COLS_ORIENTATION_NUMBERS + 1] = (int)((((-(2*yi*(ys-y0))+sqrt((2*yi*(ys-y0))*(2*yi*(ys-y0))
- 4*(xi*xi + yi*yi)*((ys-y0)*(ys-y0) - RTParamArr[1]*RTParamArr[1]
)))/(2*(xi*xi + yi*yi)) + 20)*xi)/(RTParamArr[3]));
ResultOut[ID*N_COLS_ORIENTATION_NUMBERS + 2] = (2*ResultOut[ID*N_COLS_ORIENTATION_NUMBERS + 1] + 1) * ResultOut[ID*N_COLS_ORIENTATION_NUMBERS + 0];
}
__device__ int TryFriedel(RealType ys, RealType zs,
RealType ttheta, RealType eta, RealType omega, int ringno,
RealType Ring_rad, RealType Rsample, RealType Hbeam, RealType OmeTol,
RealType RadiusTol, RealType *ObsSpotsLab, RealType *hkls, int *n_arr,
RealType *RTParamArr, RealType *ResultArray, int rowID, RealType RefRad){
int NrFriedel = 0;
RealType OmeF;
if (omega < 0 ) OmeF = omega + 180;
else OmeF = omega - 180;
int quadr_coeff2 = 0, quadr_coeff, coeff_y0 = 0, coeff_z0 = 0;
RealType eta_Hbeam, y0_max_z0, y0_min_z0, y0_max = 0, y0_min = 0, z0_min = 0, z0_max = 0;
if (eta > 90) eta_Hbeam = 180 - eta;
else if (eta < -90) eta_Hbeam = 180 - fabs(eta);
else eta_Hbeam = 90 - fabs(eta);
Hbeam = Hbeam + 2*(Rsample*tan(ttheta*deg2rad))*(sin(eta_Hbeam*deg2rad));
RealType eta_pole = (1 + rad2deg*acos(1-(Hbeam/Ring_rad)));
RealType eta_equator = (1 + rad2deg*acos(1-(Rsample/Ring_rad)));
if ((eta >= eta_pole) && (eta <= (90-eta_equator)) ) { // % 1st quadrant
quadr_coeff = 1;
coeff_y0 = -1;
coeff_z0 = 1;
} else if ( (eta >=(90+eta_equator)) && (eta <= (180-eta_pole)) ) {//% 4th quadrant
quadr_coeff = 2;
coeff_y0 = -1;
coeff_z0 = -1;
} else if ( (eta >= (-90+eta_equator) ) && (eta <= -eta_pole) ) { // % 2nd quadrant
quadr_coeff = 2;
coeff_y0 = 1;
coeff_z0 = 1;
} else if ( (eta >= (-180+eta_pole) ) && (eta <= (-90-eta_equator)) ) { // % 3rd quadrant
quadr_coeff = 1;
coeff_y0 = 1;
coeff_z0 = -1;
} else quadr_coeff = 0;
RealType y0_max_Rsample = ys + Rsample;
RealType y0_min_Rsample = ys - Rsample;
RealType z0_max_Hbeam = zs + 0.5 * Hbeam;
RealType z0_min_Hbeam = zs - 0.5 * Hbeam;
if (quadr_coeff == 1) {
y0_max_z0 = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_max_Hbeam * z0_max_Hbeam));
y0_min_z0 = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_min_Hbeam * z0_min_Hbeam));
} else if (quadr_coeff == 2) {
y0_max_z0 = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_min_Hbeam * z0_min_Hbeam));
y0_min_z0 = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_max_Hbeam * z0_max_Hbeam));
}
if (quadr_coeff > 0) {
y0_max = min(y0_max_Rsample, y0_max_z0);
y0_min = max(y0_min_Rsample, y0_min_z0);
} else {
if ((eta > -eta_pole) && (eta < eta_pole )) {
y0_max = y0_max_Rsample;
y0_min = y0_min_Rsample;
coeff_z0 = 1;
} else if (eta < (-180+eta_pole)) {
y0_max = y0_max_Rsample;
y0_min = y0_min_Rsample;
coeff_z0 = -1;
} else if (eta > (180-eta_pole)) {
y0_max = y0_max_Rsample;
y0_min = y0_min_Rsample;
coeff_z0 = -1;
} else if (( eta > (90-eta_equator)) && (eta < (90+eta_equator)) ) {
quadr_coeff2 = 1;
z0_max = z0_max_Hbeam;
z0_min = z0_min_Hbeam;
coeff_y0 = -1;
} else if ((eta > (-90-eta_equator)) && (eta < (-90+eta_equator)) ) {
quadr_coeff2 = 1;
z0_max = z0_max_Hbeam;
z0_min = z0_min_Hbeam;
coeff_y0 = 1;
}
}
if ( quadr_coeff2 == 0 ) {
z0_min = coeff_z0 * sqrt((Ring_rad * Ring_rad)-(y0_min * y0_min));
z0_max = coeff_z0 * sqrt((Ring_rad * Ring_rad)-(y0_max * y0_max));
} else {
y0_min = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_min * z0_min));
y0_max = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_max * z0_max));
}
RealType dYMin = ys - y0_min;
RealType dYMax = ys - y0_max;
RealType dZMin = zs - z0_min;
RealType dZMax = zs - z0_max;
RealType YMinFrIdeal = y0_min;
RealType YMaxFrIdeal = y0_max;
RealType ZMinFrIdeal = -z0_min;
RealType ZMaxFrIdeal = -z0_max;
RealType YMinFr = YMinFrIdeal - dYMin;
RealType YMaxFr = YMaxFrIdeal - dYMax;
RealType ZMinFr = ZMinFrIdeal + dZMin;
RealType ZMaxFr = ZMaxFrIdeal + dZMax;
RealType Eta1, Eta2;
Eta1 = CalcEtaAngle((YMinFr + ys),(ZMinFr - zs));
Eta2 = CalcEtaAngle((YMaxFr + ys),(ZMaxFr - zs));
RealType EtaMinF = min(Eta1,Eta2);
RealType EtaMaxF = max(Eta1,Eta2);
RealType yf, zf, EtaTransf, radius, IdealY, IdealZ, xi,yi,zi, hklnormal[3], hkl[3];
for (int r=0 ; r < n_arr[0] ; r++) {
if ( ((int)ObsSpotsLab[r*9+5]) != ringno ) continue; // Not a Friedel pair
if ( fabs(ObsSpotsLab[r*9+2] - OmeF) > OmeTol) continue; // Not a Friedel pair
yf = ObsSpotsLab[r*9+0];
zf = ObsSpotsLab[r*9+1];
EtaTransf = CalcEtaAngle(yf + ys, zf - zs);
radius = sqrt((yf + ys)*(yf + ys) + (zf - zs)*(zf - zs));
if ( fabs(radius - 2*Ring_rad) > RadiusTol) continue;
if (( EtaTransf < EtaMinF) || (EtaTransf > EtaMaxF) ) continue;
IdealY = Ring_rad*(ys - ((-ObsSpotsLab[r*9+0] + ys)/2))/sqrt((
ys - ((-ObsSpotsLab[r*9+0] + ys)/2))*(ys - ((-ObsSpotsLab[r*9+0] +
ys)/2))+(zs - (( ObsSpotsLab[r*9+1] + zs)/2))*(zs - ((
ObsSpotsLab[r*9+1] + zs)/2)));
IdealZ = Ring_rad*(zs - (( ObsSpotsLab[r*9+1] + zs)/2))/sqrt((
ys - ((-ObsSpotsLab[r*9+0] + ys)/2))*(ys - ((-ObsSpotsLab[r*9+0] +
ys)/2))+(zs - (( ObsSpotsLab[r*9+1] + zs)/2))*(zs - ((
ObsSpotsLab[r*9+1] + zs)/2)));
xi = RTParamArr[0]/CalcLength(RTParamArr[0],IdealY,IdealZ);
yi = IdealY/CalcLength(RTParamArr[0],IdealY,IdealZ);
zi = IdealZ/CalcLength(RTParamArr[0],IdealY,IdealZ);
hklnormal[0] = (-1 + xi) * cos(-omega*deg2rad) - yi * sin(-omega*deg2rad);
hklnormal[1] = (-1 + xi) * sin(-omega*deg2rad) + yi * cos(-omega*deg2rad);
hklnormal[2] = zi;
for (int i=0;i<n_arr[1];i++){
if ((int) hkls[i*7+3] == ringno){
hkl[0] = hkls[i*7+0];
hkl[1] = hkls[i*7+1];
hkl[2] = hkls[i*7+2];
break;
}
}
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 0] = hkl[0];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 1] = hkl[1];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 2] = hkl[2];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 3] = hklnormal[0];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 4] = hklnormal[1];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 5] = hklnormal[2];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 6] = (RealType) ringno;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 7] = IdealY;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 8] = IdealZ;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 9] = xi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 10] = yi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 11] = zi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 12] = ys;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 13] = zs;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 14] = omega;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 15] = RefRad;
NrFriedel++;
}
return NrFriedel;
}
__device__ int CalcAllPlanes(RealType ys, RealType zs,
RealType ttheta, RealType eta, RealType omega, int ringno,
RealType Ring_rad, RealType Rsample, RealType Hbeam, RealType *hkls, int *n_arr,
RealType *RTParamArr, RealType *ResultArray, int rowID, RealType RefRad){
int nPlanes=0;
RealType hkl[3];
for (int i=0;i<n_arr[1];i++){
if ((int) hkls[i*7+3] == ringno){
hkl[0] = hkls[i*7+0];
hkl[1] = hkls[i*7+1];
hkl[2] = hkls[i*7+2];
break;
}
}
int quadr_coeff2 = 0;
RealType eta_Hbeam, quadr_coeff, coeff_y0 = 0, coeff_z0 = 0, y0_max_z0, y0_min_z0, y0_max = 0, y0_min = 0, z0_min = 0, z0_max = 0;
RealType y01, z01, y02, z02, y_diff, z_diff, length;
int nsteps;
RealType step_size = RTParamArr[3];
if (eta > 90)
eta_Hbeam = 180 - eta;
else if (eta < -90)
eta_Hbeam = 180 - fabs(eta);
else
eta_Hbeam = 90 - fabs(eta);
Hbeam = Hbeam + 2*(Rsample*tan(ttheta*deg2rad))*(sin(eta_Hbeam*deg2rad));
RealType eta_pole = 1 + rad2deg*acos(1-(Hbeam/Ring_rad));
RealType eta_equator = 1 + rad2deg*acos(1-(Rsample/Ring_rad));
if ((eta >= eta_pole) && (eta <= (90-eta_equator)) ) { // % 1st quadrant
quadr_coeff = 1;
coeff_y0 = -1;
coeff_z0 = 1;
}else if ( (eta >=(90+eta_equator)) && (eta <= (180-eta_pole)) ) {//% 4th quadrant
quadr_coeff = 2;
coeff_y0 = -1;
coeff_z0 = -1;
}else if ( (eta >= (-90+eta_equator) ) && (eta <= -eta_pole) ) { // % 2nd quadrant
quadr_coeff = 2;
coeff_y0 = 1;
coeff_z0 = 1;
} else if ( (eta >= (-180+eta_pole) ) && (eta <= (-90-eta_equator)) ) { // % 3rd quadrant
quadr_coeff = 1;
coeff_y0 = 1;
coeff_z0 = -1;
}else
quadr_coeff = 0;
RealType y0_max_Rsample = ys + Rsample;
RealType y0_min_Rsample = ys - Rsample;
RealType z0_max_Hbeam = zs + 0.5 * Hbeam;
RealType z0_min_Hbeam = zs - 0.5 * Hbeam;
if (quadr_coeff == 1) {
y0_max_z0 = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_max_Hbeam * z0_max_Hbeam));
y0_min_z0 = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_min_Hbeam * z0_min_Hbeam));
}else if (quadr_coeff == 2) {
y0_max_z0 = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_min_Hbeam * z0_min_Hbeam));
y0_min_z0 = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_max_Hbeam * z0_max_Hbeam));
}
if (quadr_coeff > 0) {
y0_max = min(y0_max_Rsample, y0_max_z0);
y0_min = max(y0_min_Rsample, y0_min_z0);
}else {
if ((eta > -eta_pole) && (eta < eta_pole )) {
y0_max = y0_max_Rsample;
y0_min = y0_min_Rsample;
coeff_z0 = 1;
}else if (eta < (-180+eta_pole)) {
y0_max = y0_max_Rsample;
y0_min = y0_min_Rsample;
coeff_z0 = -1;
}else if (eta > (180-eta_pole)) {
y0_max = y0_max_Rsample;
y0_min = y0_min_Rsample;
coeff_z0 = -1;
}else if (( eta > (90-eta_equator)) && (eta < (90+eta_equator)) ) {
quadr_coeff2 = 1;
z0_max = z0_max_Hbeam;
z0_min = z0_min_Hbeam;
coeff_y0 = -1;
}else if ((eta > (-90-eta_equator)) && (eta < (-90+eta_equator)) ) {
quadr_coeff2 = 1;
z0_max = z0_max_Hbeam;
z0_min = z0_min_Hbeam;
coeff_y0 = 1;
}
}
if (quadr_coeff2 == 0 ) {
y01 = y0_min;
z01 = coeff_z0 * sqrt((Ring_rad * Ring_rad )-(y01 * y01));
y02 = y0_max;
z02 = coeff_z0 * sqrt((Ring_rad * Ring_rad )-(y02 * y02));
y_diff = y01 - y02;
z_diff = z01 - z02;
length = sqrt(y_diff * y_diff + z_diff * z_diff);
nsteps = ceil(length/step_size);
}else {
z01 = z0_min;
y01 = coeff_y0 * sqrt((Ring_rad * Ring_rad )-((z01 * z01)));
z02 = z0_max;
y02 = coeff_y0 * sqrt((Ring_rad * Ring_rad )-((z02 * z02)));
y_diff = y01 - y02;
z_diff = z01 - z02;
length = sqrt(y_diff * y_diff + z_diff * z_diff);
nsteps = ceil(length/step_size);
}
if ((nsteps % 2) == 0 ) {
nsteps = nsteps +1;
}
// Now we know nsteps, we know ys, zs, y0_min, z0_min, y0_max, z0_max
// Calculate y0_vector and z0_vector are IdealY and IdealZ and these can be used to calc hklnormal
RealType y0_vector, z0_vector, xi, yi, zi, hklnormal[3], lenK;
if ( nsteps == 1 ) {
if (quadr_coeff2 == 0) {
y0_vector = (y0_max+y0_min)/2;
z0_vector = coeff_z0 * sqrt((Ring_rad * Ring_rad)-(y0_vector * y0_vector));
}else {
z0_vector = (z0_max+z0_min)/2;
y0_vector = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_vector * z0_vector));
}
lenK = CalcNorm3(RTParamArr[0],y0_vector,z0_vector);
xi = RTParamArr[0]/lenK;
yi = y0_vector/lenK;
zi = z0_vector/lenK;
hklnormal[0] = (-1 + xi) * cos(-omega*deg2rad) - yi * sin(-omega*deg2rad);
hklnormal[1] = (-1 + xi) * sin(-omega*deg2rad) + yi * cos(-omega*deg2rad);
hklnormal[2] = zi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 0] = hkl[0];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 1] = hkl[1];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 2] = hkl[2];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 3] = hklnormal[0];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 4] = hklnormal[1];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 5] = hklnormal[2];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 6] = (RealType) ringno;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 7] = y0_vector;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 8] = z0_vector;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 9] = xi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 10] = yi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 11] = zi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 12] = ys;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 13] = zs;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 14] = omega;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 15] = RefRad;
nPlanes++;
}else {
int i;
RealType stepsizeY = (y0_max-y0_min)/(nsteps-1);
RealType stepsizeZ = (z0_max-z0_min)/(nsteps-1);
if (quadr_coeff2 == 0) {
for (i=0 ; i < nsteps ; i++) {
y0_vector = y0_min + i*stepsizeY;
z0_vector = coeff_z0 * sqrt((Ring_rad * Ring_rad)-(y0_vector * y0_vector));
lenK = CalcNorm3(RTParamArr[0],y0_vector,z0_vector);
xi = RTParamArr[0]/lenK;
yi = y0_vector/lenK;
zi = z0_vector/lenK;
hklnormal[0] = (-1 + xi) * cos(-omega*deg2rad) - yi * sin(-omega*deg2rad);
hklnormal[1] = (-1 + xi) * sin(-omega*deg2rad) + yi * cos(-omega*deg2rad);
hklnormal[2] = zi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 0] = hkl[0];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 1] = hkl[1];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 2] = hkl[2];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 3] = hklnormal[0];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 4] = hklnormal[1];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 5] = hklnormal[2];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 6] = (RealType) ringno;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 7] = y0_vector;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 8] = z0_vector;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 9] = xi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 10] = yi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 11] = zi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 12] = ys;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 13] = zs;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 14] = omega;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 15] = RefRad;
nPlanes++;
}
}else {
for (i=0 ; i < nsteps ; i++) {
z0_vector = z0_min + i*stepsizeZ;
y0_vector = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_vector * z0_vector));
lenK = CalcNorm3(RTParamArr[0],y0_vector,z0_vector);
xi = RTParamArr[0]/lenK;
yi = y0_vector/lenK;
zi = z0_vector/lenK;
hklnormal[0] = (-1 + xi) * cos(-omega*deg2rad) - yi * sin(-omega*deg2rad);
hklnormal[1] = (-1 + xi) * sin(-omega*deg2rad) + yi * cos(-omega*deg2rad);
hklnormal[2] = zi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 0] = hkl[0];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 1] = hkl[1];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 2] = hkl[2];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 3] = hklnormal[0];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 4] = hklnormal[1];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 5] = hklnormal[2];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 6] = (RealType) ringno;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 7] = y0_vector;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 8] = z0_vector;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 9] = xi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 10] = yi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 11] = zi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 12] = ys;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 13] = zs;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 14] = omega;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + nPlanes * N_COLS_FRIEDEL_RESULTS + 15] = RefRad;
nPlanes++;
}
}
}
return nPlanes;
}
__global__ void FriedelFinding (int *SpotIDs, RealType *ObsSpotsLab,
RealType *hkls, int *n_arr, int *IntParamArr, RealType *RTParamArr, RealType *ResultArray, int *nNormals){
int rowID = blockIdx.x * blockDim.x + threadIdx.x;
if (rowID >= n_arr[2]) return;
int SpotID = SpotIDs[rowID];
int SpotRowNo = FindRowInMatrix(ObsSpotsLab, n_arr[0], N_COL_OBSSPOTS, 4, SpotID);
if (SpotRowNo == -1) {
printf("WARNING: SpotId %d not found in spots file! Ignoring this spotID. n_spots = %d\n", SpotID, n_arr[0]);
return;
}
RealType RefRad = ObsSpotsLab[SpotRowNo*9+3];
int nPlaneNormals = 0;
if (IntParamArr[2] == 1) {
nPlaneNormals = TryFriedel(ObsSpotsLab[SpotRowNo*9+0], ObsSpotsLab[SpotRowNo*9+1],
ObsSpotsLab[SpotRowNo*9+7], ObsSpotsLab[SpotRowNo*9+6], ObsSpotsLab[SpotRowNo*9+2], (int) ObsSpotsLab[SpotRowNo*9+5],
RTParamArr[(int) ObsSpotsLab[SpotRowNo*9+5] + 5], RTParamArr[1], RTParamArr[2], RTParamArr[5 + MAX_N_RINGS + 0],
RTParamArr[5 + MAX_N_RINGS + 3],ObsSpotsLab, hkls, n_arr, RTParamArr, ResultArray,rowID,RefRad);
nNormals[rowID] = nPlaneNormals;
if (nPlaneNormals == 0){
//nPlaneNormals = TryFriedelMixed();
//nNormals[rowID] = nPlaneNormals;
if (nPlaneNormals != 0){
return;
}else{
nPlaneNormals = CalcAllPlanes(ObsSpotsLab[SpotRowNo*9+0],
ObsSpotsLab[SpotRowNo*9+1], ObsSpotsLab[SpotRowNo*9+7],
ObsSpotsLab[SpotRowNo*9+6], ObsSpotsLab[SpotRowNo*9+2],
(int) ObsSpotsLab[SpotRowNo*9+5],
RTParamArr[(int) ObsSpotsLab[SpotRowNo*9+5] + 5],
RTParamArr[1], RTParamArr[2], hkls, n_arr, RTParamArr,
ResultArray,rowID,RefRad);
nNormals[rowID] = nPlaneNormals;
return;
}
}else{
return;
}
}
nPlaneNormals = CalcAllPlanes(ObsSpotsLab[SpotRowNo*9+0],
ObsSpotsLab[SpotRowNo*9+1], ObsSpotsLab[SpotRowNo*9+7],
ObsSpotsLab[SpotRowNo*9+6], ObsSpotsLab[SpotRowNo*9+2],
(int) ObsSpotsLab[SpotRowNo*9+5],
RTParamArr[(int) ObsSpotsLab[SpotRowNo*9+5] + 5],
RTParamArr[1], RTParamArr[2], hkls, n_arr, RTParamArr,
ResultArray,rowID,RefRad);
nNormals[rowID] = nPlaneNormals;
return;
}
static inline RealType sin_cos_to_angle (RealType s, RealType c){return (s >= 0.0) ? acos(c) : 2.0 * M_PI - acos(c);}
static inline void OrientMat2Euler(RealType m[3][3],RealType Euler[3])
{
RealType psi, phi, theta, sph;
if (fabs(m[2][2] - 1.0) < EPS){
phi = 0;
}else{
phi = acos(m[2][2]);
}
sph = sin(phi);
if (fabs(sph) < EPS)
{
psi = 0.0;
theta = (fabs(m[2][2] - 1.0) < EPS) ? sin_cos_to_angle(m[1][0], m[0][0]) : sin_cos_to_angle(-m[1][0], m[0][0]);
} else{
psi = (fabs(-m[1][2] / sph) <= 1.0) ? sin_cos_to_angle(m[0][2] / sph, -m[1][2] / sph) : sin_cos_to_angle(m[0][2] / sph,1);
theta = (fabs(m[2][1] / sph) <= 1.0) ? sin_cos_to_angle(m[2][0] / sph, m[2][1] / sph) : sin_cos_to_angle(m[2][0] / sph,1);
}
Euler[0] = rad2deg*psi;
Euler[1] = rad2deg*phi;
Euler[2] = rad2deg*theta;
}
int getSPcores(cudaDeviceProp devProp)
{
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major){
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
int main(int argc, char *argv[]){
printf("\n\n\t\t\tGPU Indexer v1.0\nContact hsharma@anl.gov in case of questions about the MIDAS project.\n\n");
int cudaDeviceNum = atoi(argv[2]);
cudaSetDevice(cudaDeviceNum);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
int nCores = getSPcores(deviceProp);
printf("Cuda Cores: %d\n",nCores);
RealType iStart = cpuSecond();
cudaGetDeviceProperties(&deviceProp,0);
size_t gpuGlobalMem = deviceProp.totalGlobalMem;
fprintf(stderr, "GPU global memory = %zu MBytes\n", gpuGlobalMem/(1024*1024));
size_t freeMem, totalMem;
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
char folder[MAX_LINE_LENGTH];
struct ParametersStruct Parameters;
char ParamFN[MAX_LINE_LENGTH];
getcwd(folder,sizeof(folder));
sprintf(ParamFN,"%s/%s",folder,argv[1]);
printf("Reading parameters from file: %s.\n", ParamFN);
int returncode = ReadParams(ParamFN, &Parameters);
int *SpotIDs_h;
SpotIDs_h = (int *) malloc(sizeof(*SpotIDs_h)* MAX_N_SPOTS);
char spotIDsfn[MAX_LINE_LENGTH];
sprintf(spotIDsfn,"%s/%s",folder,Parameters.IDsFileName);
fflush(stdout);
int nSpotIDs=0;
FILE *IDsFile = fopen(spotIDsfn,"r");
char line[MAX_LINE_LENGTH];
while (fgets(line,MAX_LINE_LENGTH,IDsFile)!=NULL){
SpotIDs_h[nSpotIDs] = atoi(line);
nSpotIDs++;
}
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Memcpy to spotIDs Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
RealType hkls[MAX_N_HKLS*7];
int HKLints[MAX_N_HKLS*4];
char *hklfn = "hkls.csv";
FILE *hklf = fopen(hklfn,"r");
char aline[MAX_LINE_LENGTH],dummy[MAX_LINE_LENGTH];
fgets(aline,MAX_LINE_LENGTH,hklf);
int Rnr,i;
int hi,ki,li;
RealType hc,kc,lc,RRd,Ds,tht;
int n_hkls_h = 0;
while (fgets(aline,MAX_LINE_LENGTH,hklf)!=NULL){
sscanf(aline, "%d %d %d %lf %d %lf %lf %lf %lf %s %lf",&hi,&ki,&li,&Ds,&Rnr,&hc,&kc,&lc,&tht,dummy,&RRd);
for (i=0;i<Parameters.NrOfRings;i++){
if (Rnr == Parameters.RingNumbers[i]){
HKLints[n_hkls_h*4+0] = hi;
HKLints[n_hkls_h*4+1] = ki;
HKLints[n_hkls_h*4+2] = li;
HKLints[n_hkls_h*4+3] = Rnr;
hkls[n_hkls_h*7+0] = hc;
hkls[n_hkls_h*7+1] = kc;
hkls[n_hkls_h*7+2] = lc;
hkls[n_hkls_h*7+3] = (RealType)Rnr;
hkls[n_hkls_h*7+4] = Ds;
hkls[n_hkls_h*7+5] = tht;
hkls[n_hkls_h*7+6] = RRd;
n_hkls_h++;
}
}
}
char datafn[MAX_LINE_LENGTH];
sprintf(datafn,"%s/%s",folder,"Data.bin");
char ndatafn[MAX_LINE_LENGTH];
sprintf(ndatafn,"%s/%s",folder,"nData.bin");
char spotsfn[MAX_LINE_LENGTH];
sprintf(spotsfn,"%s/%s",folder,"Spots.bin");
char extrafn[MAX_LINE_LENGTH];
sprintf(extrafn,"%s/%s",folder,"ExtraInfo.bin");
FILE *fData = fopen(datafn,"r");
FILE *fnData = fopen(ndatafn,"r");
FILE *fSpots = fopen(spotsfn,"r");
FILE *fExtraInfo = fopen(extrafn,"r");
RealType *hkls_d, *etamargins_d;
int *HKLints_d;
RealType etamargins[MAX_N_RINGS];
for ( i = 0 ; i < MAX_N_RINGS ; i++) {
if ( Parameters.RingRadii[i] == 0) {
etamargins[i] = 0;
}else {
etamargins[i] = rad2deg * atan(Parameters.MarginEta/Parameters.RingRadii[i]) + 0.5 * Parameters.StepsizeOrient;
}
}
cudaMalloc((RealType **)&hkls_d,n_hkls_h*7*sizeof(RealType));
cudaMalloc((int **)&HKLints_d,n_hkls_h*4*sizeof(int));
cudaMalloc((RealType **)&etamargins_d,MAX_N_RINGS*sizeof(RealType));
cudaMemcpy(hkls_d,hkls,n_hkls_h*7*sizeof(RealType),cudaMemcpyHostToDevice);
cudaMemcpy(HKLints_d,HKLints,n_hkls_h*4*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(etamargins_d,etamargins,MAX_N_RINGS*sizeof(RealType),cudaMemcpyHostToDevice);
int nspids = nSpotIDs, *sps;
cudaMalloc((int **)&sps,nspids*sizeof(int));
cudaMemcpy(sps,SpotIDs_h,nspids*sizeof(int),cudaMemcpyHostToDevice);
RealType *ObsSpotsLab, *spots_h;
fseek(fSpots,0L,SEEK_END);
long long sizeSpots = ftell(fSpots);
rewind(fSpots);
spots_h = (RealType *)malloc(sizeSpots);
fread(spots_h,sizeSpots,1,fSpots);
cudaMalloc((RealType **)&ObsSpotsLab,(size_t)sizeSpots);
cudaMemcpy(ObsSpotsLab,spots_h,sizeSpots,cudaMemcpyHostToDevice);
free(spots_h);
int n_spots_h = ((int)sizeSpots)/(9*sizeof(RealType));
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "End data Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "FewSpotIDs Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
int *n_arr, n_arr_h[3];
cudaMalloc((int **)&n_arr,sizeof(int)*3);
n_arr_h[0] = n_spots_h;
n_arr_h[1] = n_hkls_h;
n_arr_h[2] = nspids;
cudaMemcpy(n_arr,n_arr_h,3*sizeof(int),cudaMemcpyHostToDevice);
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "SpotsInfo Theor and BestGrains Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
int *IntParamArr, IntParamArr_h[3];
IntParamArr_h[0] = Parameters.SpaceGroupNum;
IntParamArr_h[1] = Parameters.NoOfOmegaRanges;
IntParamArr_h[2] = Parameters.UseFriedelPairs;
cudaMalloc((int **)&IntParamArr, sizeof(int)*3);
cudaMemcpy(IntParamArr,IntParamArr_h,sizeof(int)*3,cudaMemcpyHostToDevice);
RealType *RTParamArr, RTParamArr_h[5 + MAX_N_RINGS + 8 + 10];
RTParamArr_h[0] = Parameters.Distance;
RTParamArr_h[1] = Parameters.Rsample;
RTParamArr_h[2] = Parameters.Hbeam;
RTParamArr_h[3] = Parameters.StepsizePos;
RTParamArr_h[4] = Parameters.StepsizeOrient;
for (int cntr=0;cntr<MAX_N_RINGS;cntr++) RTParamArr_h[5+cntr] = Parameters.RingRadii[cntr];
RTParamArr_h[5+MAX_N_RINGS+0] = Parameters.MarginOme;
RTParamArr_h[5+MAX_N_RINGS+1] = Parameters.MarginEta;
RTParamArr_h[5+MAX_N_RINGS+2] = Parameters.MarginRad;
RTParamArr_h[5+MAX_N_RINGS+3] = Parameters.MarginRadial;
RTParamArr_h[5+MAX_N_RINGS+4] = Parameters.EtaBinSize;
RTParamArr_h[5+MAX_N_RINGS+5] = Parameters.OmeBinSize;
RTParamArr_h[5+MAX_N_RINGS+6] = Parameters.ExcludePoleAngle;
RTParamArr_h[5+MAX_N_RINGS+7] = Parameters.MinMatchesToAcceptFrac;
for (int cntr=0;cntr<6;cntr++) RTParamArr_h[5+MAX_N_RINGS+8+cntr] = Parameters.ABCABG[cntr];
RTParamArr_h[5+MAX_N_RINGS+8+6] = Parameters.Wavelength;
RTParamArr_h[5+MAX_N_RINGS+8+7] = Parameters.wedge;
RTParamArr_h[5+MAX_N_RINGS+8+8] = Parameters.MargABC;
RTParamArr_h[5+MAX_N_RINGS+8+9] = Parameters.MargABG;
cudaMalloc((RealType **)&RTParamArr,(23+MAX_N_RINGS)*sizeof(RealType));
cudaMemcpy(RTParamArr,RTParamArr_h,(23+MAX_N_RINGS)*sizeof(RealType),cudaMemcpyHostToDevice);
RealType *OmeBoxArr, OmeBoxArr_h[Parameters.NoOfOmegaRanges * 6];
for (int cntr=0;cntr<Parameters.NoOfOmegaRanges;cntr++){
OmeBoxArr_h[cntr*6 + 0] = Parameters.BoxSizes[cntr][0];
OmeBoxArr_h[cntr*6 + 1] = Parameters.BoxSizes[cntr][1];
OmeBoxArr_h[cntr*6 + 2] = Parameters.BoxSizes[cntr][2];
OmeBoxArr_h[cntr*6 + 3] = Parameters.BoxSizes[cntr][3];
OmeBoxArr_h[cntr*6 + 4] = Parameters.OmegaRanges[cntr][0];
OmeBoxArr_h[cntr*6 + 5] = Parameters.OmegaRanges[cntr][1];
}
cudaMalloc((RealType **)&OmeBoxArr,Parameters.NoOfOmegaRanges * 6 * sizeof(RealType));
cudaMemcpy(OmeBoxArr,OmeBoxArr_h,Parameters.NoOfOmegaRanges * 6 * sizeof(RealType),cudaMemcpyHostToDevice);
int dim = nspids;
dim3 block (256);
dim3 grid ((dim/block.x)+1);
printf("Time elapsed before FriedelFinding: %fs\n",cpuSecond()-iStart);
RealType *ResultArray;
int *nNormals;
cudaMalloc((RealType **)&ResultArray,sizeof(RealType)*nspids*MAX_N_FRIEDEL_PAIRS*N_COLS_FRIEDEL_RESULTS);
cudaMalloc((int **)&nNormals,sizeof(int)*nspids);
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Finding Friedel Pairs Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
FriedelFinding<<<grid,block>>>(sps, ObsSpotsLab, hkls_d,n_arr,IntParamArr,RTParamArr,ResultArray,nNormals);
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
int *data, *nData, *data_h, *nData_h;
fseek(fData,0L,SEEK_END);
long long sizeData = ftell(fData);
rewind(fData);
data_h = (int *)malloc(sizeData);
fread(data_h,sizeData,1,fData);
cudaMalloc((int **)&data,(size_t)sizeData);
cudaMemcpy(data,data_h,sizeData,cudaMemcpyHostToDevice);
free(data_h);
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Memcpy data Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
fseek(fnData,0L,SEEK_END);
long long sizenData = ftell(fnData);
rewind(fnData);
nData_h = (int *)malloc(sizenData);
fread(nData_h,sizenData,1,fnData);
cudaMalloc((int **)&nData,(size_t)sizenData);
cudaMemcpy(nData,nData_h,sizenData,cudaMemcpyHostToDevice);
free(nData_h);
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Memcpy ndata Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
int *nNormals_h;
nNormals_h = (int *) malloc(sizeof(int) * nspids);
cudaMemcpy(nNormals_h, nNormals, sizeof(int) * nspids, cudaMemcpyDeviceToHost);
RealType *ResultArray_h;
ResultArray_h = (RealType *) malloc(sizeof(RealType)*nspids*MAX_N_FRIEDEL_PAIRS*N_COLS_FRIEDEL_RESULTS);
cudaMemcpy(ResultArray_h,ResultArray,sizeof(RealType)*nspids*MAX_N_FRIEDEL_PAIRS*N_COLS_FRIEDEL_RESULTS,cudaMemcpyDeviceToHost);
cudaFree(ResultArray);
int sumTotal=0, *startingIDs;
startingIDs = (int *) malloc(sizeof(int) * nspids);
for (int i=0;i<nspids;i++){
startingIDs[i] = sumTotal;
sumTotal += nNormals_h[i];
}
RealType *ResultArr, *ResultArr_h;
int currentpos = 0, outerpos = 0, totalpos = 0;
ResultArr_h = (RealType *) malloc(sizeof(RealType)*N_COLS_FRIEDEL_RESULTS*sumTotal);
for (int i=0;i<nspids;i++){
currentpos = 0;
for (int j=0;j<nNormals_h[i];j++){
memcpy(ResultArr_h + (totalpos * N_COLS_FRIEDEL_RESULTS),
ResultArray_h + (outerpos*MAX_N_FRIEDEL_PAIRS*N_COLS_FRIEDEL_RESULTS + currentpos *N_COLS_FRIEDEL_RESULTS),
sizeof(RealType)*N_COLS_FRIEDEL_RESULTS);
currentpos++;
totalpos++;
}
outerpos++;
}
if (totalpos != sumTotal){
printf("Something wrong.\n");
return 0;
}
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Memcpy data Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
printf("Time elapsed before making orientations: %fs\n",cpuSecond()-iStart);
dim3 blocka (32);
dim3 grida ((sumTotal/blocka.x)+1);
cudaMalloc((RealType **)&ResultArr,sizeof(RealType)*N_COLS_FRIEDEL_RESULTS*sumTotal);
CHECK(cudaMemcpy(ResultArr, ResultArr_h,sizeof(RealType)*N_COLS_FRIEDEL_RESULTS*sumTotal,cudaMemcpyHostToDevice));
int *ResultMakeOrientations, *ResultMakeOrientations_h;
cudaMalloc((int **)&ResultMakeOrientations,N_COLS_ORIENTATION_NUMBERS*sumTotal*sizeof(int));
cudaMemset(ResultMakeOrientations,0,N_COLS_ORIENTATION_NUMBERS*sumTotal*sizeof(int));
//// Now generate candidates and match
MakeOrientations<<<grida,blocka>>>(ResultArr, HKLints_d, IntParamArr, RTParamArr, ResultMakeOrientations,sumTotal);
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
ResultMakeOrientations_h = (int *) malloc(N_COLS_ORIENTATION_NUMBERS*sumTotal*sizeof(int));
cudaMemcpy(ResultMakeOrientations_h,ResultMakeOrientations,N_COLS_ORIENTATION_NUMBERS*sumTotal*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Memcpy before data Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
long long int totalJobs = 0;
int maxJobs=0, maxJobsOrient=0;
for (int i=0;i<sumTotal;i++){
totalJobs += ResultMakeOrientations_h[i*N_COLS_ORIENTATION_NUMBERS + 2];
if (ResultMakeOrientations_h[i*N_COLS_ORIENTATION_NUMBERS + 2] > maxJobs)
maxJobs = ResultMakeOrientations_h[i*N_COLS_ORIENTATION_NUMBERS + 2];
if (ResultMakeOrientations_h[i*N_COLS_ORIENTATION_NUMBERS + 0] > maxJobsOrient)
maxJobsOrient = ResultMakeOrientations_h[i*N_COLS_ORIENTATION_NUMBERS + 0];
}
printf("Total Jobs: %lld, MaxJobs for one combination: %d\n",totalJobs,maxJobs);
fseek(fExtraInfo,0L,SEEK_END);
long long sizeExtra = ftell(fExtraInfo);
rewind(fExtraInfo);
RealType *ExtraInfo_h;
ExtraInfo_h = (RealType *)malloc(sizeExtra);
fread(ExtraInfo_h,sizeExtra,1,fExtraInfo);
int sizeAllSpots = (sizeExtra/14)*8;
int nExtraSpots = sizeAllSpots/(8*sizeof(RealType));
RealType *AllSpotsYZO_h;
AllSpotsYZO_h = (RealType *) malloc(sizeAllSpots);
for (int i=0;i<nExtraSpots;i++){
AllSpotsYZO_h[i*8+0] = ExtraInfo_h[i*14+0];
AllSpotsYZO_h[i*8+1] = ExtraInfo_h[i*14+1];
AllSpotsYZO_h[i*8+2] = ExtraInfo_h[i*14+2];
AllSpotsYZO_h[i*8+3] = ExtraInfo_h[i*14+4]; // ID
AllSpotsYZO_h[i*8+4] = ExtraInfo_h[i*14+8];
AllSpotsYZO_h[i*8+5] = ExtraInfo_h[i*14+9];
AllSpotsYZO_h[i*8+6] = ExtraInfo_h[i*14+10];
AllSpotsYZO_h[i*8+7] = ExtraInfo_h[i*14+5];
}
RealType *AllSpotsYZO_d;
cudaMalloc((RealType **)&AllSpotsYZO_d,sizeAllSpots);
cudaMemcpy(AllSpotsYZO_d,AllSpotsYZO_h,sizeAllSpots,cudaMemcpyHostToDevice);
RealType *AllTheorSpots, *IAs, *IAs_h, *GS, *Orientations, *GS_h, *Orientations_h, *AllInfo, *SpotsInfo_d,
*SpotsInfo, *OrientationsOut, *OrientationsOut_h;
int *AllGrainSpots,*nSpotsArr,*nMatchedArr,*nMatchedArr_h,*nSpotsArr_h, *SpotsInfoTotal;
cudaMalloc((RealType **)&AllTheorSpots,maxJobsOrient*n_hkls_h*N_COL_THEORSPOTS*2*sizeof(RealType));
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
cudaMalloc((int **)&AllGrainSpots,maxJobs*n_hkls_h*2*sizeof(int));
cudaMalloc((int **)&nSpotsArr,maxJobsOrient*sizeof(int));
cudaMalloc((RealType **)&IAs,maxJobs*sizeof(RealType));
cudaMalloc((int **)&nMatchedArr,maxJobs*sizeof(int));
cudaMemset(nMatchedArr,0,maxJobs*sizeof(int));
nMatchedArr_h = (int *) malloc(maxJobs*sizeof(int));
nSpotsArr_h = (int *) malloc(maxJobsOrient*sizeof(int));
IAs_h = (RealType *) malloc(maxJobs*sizeof(RealType));
memset(nMatchedArr_h,0,maxJobs*sizeof(int));
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Memcpy ndata Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
RealType bestFraction, tempFraction;
int nJobsOrient, posResultArr, nJobsTotal, n_min, BestPosition;
RealType bestIA, tempIA;
cudaMalloc((RealType **)&GS,3*maxJobs*sizeof(RealType));
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
cudaMalloc((RealType **)&Orientations,9*maxJobsOrient*sizeof(RealType));
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
cudaMalloc((RealType **)&SpotsInfo_d,n_hkls_h*2*9*maxJobs*sizeof(RealType));
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
cudaMalloc((RealType **)&OrientationsOut,10*maxJobs*sizeof(RealType));
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
SpotsInfo = (RealType *)malloc(n_hkls_h*2*9*sumTotal*sizeof(RealType));
OrientationsOut_h = (RealType *) malloc(10*maxJobs*sizeof(RealType));
GS_h = (RealType *) malloc(3*maxJobs*sizeof(RealType));
Orientations_h = (RealType *) malloc(9*maxJobsOrient*sizeof(RealType));
AllInfo = (RealType *) malloc(N_COL_GRAINMATCHES*sumTotal*sizeof(RealType));
memset(AllInfo,0,N_COL_GRAINMATCHES*sumTotal*sizeof(RealType));
SpotsInfoTotal = (int *) malloc(sumTotal*n_hkls_h*2*sizeof(int));
memset(SpotsInfoTotal,0,sumTotal*n_hkls_h*2*sizeof(int));
printf("Time elapsed before calculation of matches: %fs\n",cpuSecond()-iStart);
int PosOM;
for (int jobNr=0;jobNr<sumTotal;jobNr++){
posResultArr = jobNr;
nJobsOrient = ResultMakeOrientations_h[jobNr*N_COLS_ORIENTATION_NUMBERS + 0];
dim3 blockb (32);
dim3 gridb ((nJobsOrient/blockb.x)+1);
ReturnDiffractionSpots<<<gridb,blockb>>>(RTParamArr,OmeBoxArr,IntParamArr,
AllTheorSpots,hkls_d,n_arr,posResultArr,ResultArr,nJobsOrient,nSpotsArr,
Orientations);
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
cudaMemcpy(nSpotsArr_h,nSpotsArr,nJobsOrient*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(Orientations_h,Orientations,nJobsOrient*9*sizeof(RealType),cudaMemcpyDeviceToHost);
nJobsTotal = ResultMakeOrientations_h[jobNr*N_COLS_ORIENTATION_NUMBERS + 2];
dim3 blockc (32);
dim3 gridc ((nJobsTotal/blockc.x)+1);
n_min = -ResultMakeOrientations_h[jobNr*N_COLS_ORIENTATION_NUMBERS + 1];
memset(OrientationsOut_h,0,10*nJobsTotal*sizeof(RealType));
CompareDiffractionSpots<<<gridc,blockc>>>(AllTheorSpots,RTParamArr,
nJobsTotal, ResultArr, posResultArr, nSpotsArr, data, nData, ObsSpotsLab,
etamargins_d, AllGrainSpots, IAs, n_arr, nMatchedArr, n_min, nJobsOrient,GS,
AllSpotsYZO_d,SpotsInfo_d,Orientations,OrientationsOut);
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
cudaMemcpy(nMatchedArr_h,nMatchedArr,nJobsTotal*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(GS_h,GS,nJobsTotal*3*sizeof(RealType),cudaMemcpyDeviceToHost);
cudaMemcpy(IAs_h,IAs,nJobsTotal*sizeof(RealType),cudaMemcpyDeviceToHost);
cudaMemcpy(OrientationsOut_h,OrientationsOut,10*nJobsTotal*sizeof(RealType),cudaMemcpyDeviceToHost);
bestFraction = 0.0;
bestIA = 1000.0;
for (int idx=0;idx<nJobsTotal;idx++){
tempFraction = ((RealType)nMatchedArr_h[idx])/(OrientationsOut_h[idx*10+9]);
tempIA = IAs_h[idx];
if (tempFraction > bestFraction && tempFraction <= 1 && tempFraction >= 0){
bestIA = tempIA;
bestFraction = tempFraction;
BestPosition = idx;
}else if(tempFraction == bestFraction && tempIA < bestIA){
bestIA = tempIA;
BestPosition = idx;
}
}
if (bestFraction >= Parameters.MinMatchesToAcceptFrac){
cudaMemcpy(SpotsInfo+jobNr*n_hkls_h*2*9, SpotsInfo_d+BestPosition*n_hkls_h*2*9,nMatchedArr_h[BestPosition]*9*sizeof(RealType),cudaMemcpyDeviceToHost);
AllInfo[jobNr*N_COL_GRAINMATCHES + 0] = bestIA;
AllInfo[jobNr*N_COL_GRAINMATCHES + 1] = OrientationsOut_h[BestPosition*10+0];
AllInfo[jobNr*N_COL_GRAINMATCHES + 2] = OrientationsOut_h[BestPosition*10+1];
AllInfo[jobNr*N_COL_GRAINMATCHES + 3] = OrientationsOut_h[BestPosition*10+2];
AllInfo[jobNr*N_COL_GRAINMATCHES + 4] = OrientationsOut_h[BestPosition*10+3];
AllInfo[jobNr*N_COL_GRAINMATCHES + 5] = OrientationsOut_h[BestPosition*10+4];
AllInfo[jobNr*N_COL_GRAINMATCHES + 6] = OrientationsOut_h[BestPosition*10+5];
AllInfo[jobNr*N_COL_GRAINMATCHES + 7] = OrientationsOut_h[BestPosition*10+6];
AllInfo[jobNr*N_COL_GRAINMATCHES + 8] = OrientationsOut_h[BestPosition*10+7];
AllInfo[jobNr*N_COL_GRAINMATCHES + 9] = OrientationsOut_h[BestPosition*10+8];
AllInfo[jobNr*N_COL_GRAINMATCHES + 10] = GS_h[BestPosition*3 + 0];
AllInfo[jobNr*N_COL_GRAINMATCHES + 11] = GS_h[BestPosition*3 + 1];
AllInfo[jobNr*N_COL_GRAINMATCHES + 12] = GS_h[BestPosition*3 + 2];
AllInfo[jobNr*N_COL_GRAINMATCHES + 13] = OrientationsOut_h[BestPosition*10+9];
AllInfo[jobNr*N_COL_GRAINMATCHES + 14] = (RealType)nMatchedArr_h[BestPosition];
AllInfo[jobNr*N_COL_GRAINMATCHES + 15] = bestFraction;
}
}
printf("Time elapsed after calculation of matches: %fs\n",cpuSecond()-iStart);
// Now sort all the results.
RealType *SaveAllInfo, *spotsYZO, *LatCIn_h, *FitParams_h;
int *nMatchedArrIndexing;
spotsYZO = (RealType *) malloc(nspids*n_hkls_h*2*8*sizeof(RealType));
LatCIn_h = (RealType *) malloc(nspids*6*sizeof(RealType));
FitParams_h = (RealType *) malloc(nspids*12*sizeof(RealType));
nMatchedArrIndexing = (int *) malloc(nspids*3*sizeof(int));
memset(spotsYZO,0,nspids*n_hkls_h*2*9*sizeof(RealType));
memset(LatCIn_h,0,nspids*6*sizeof(RealType));
memset(FitParams_h,0,nspids*12*sizeof(RealType));
memset(nMatchedArrIndexing,0,nspids*3*sizeof(int));
int StartingPosition, EndPosition, bestPos;
RealType OrientTr[3][3], EulerTr[3];
int nSpotsIndexed = 0, nMatchedTillNow = 0, nSpotsMatched, nSpotsSim, *idsIndexed;
idsIndexed = (int *) malloc(nspids*sizeof(int));
memset(idsIndexed,0,nspids*sizeof(int));
for (int i=0;i<nspids;i++){
StartingPosition = startingIDs[i];
EndPosition = StartingPosition + nNormals_h[i];
bestFraction = Parameters.MinMatchesToAcceptFrac;
bestIA = 1000.0;
bestPos = -1;
for (int PlanePos=StartingPosition; PlanePos<EndPosition; PlanePos++){
tempIA = AllInfo[PlanePos*N_COL_GRAINMATCHES + 0];
tempFraction = AllInfo[PlanePos*N_COL_GRAINMATCHES + 15];
if (tempFraction > bestFraction){
bestFraction = tempFraction;
bestPos = PlanePos;
bestIA = tempIA;
} else if (tempFraction == bestFraction && tempIA < bestIA){
bestIA = tempIA;
bestPos = PlanePos;
}
}
if (bestPos >-1){
nSpotsMatched = (int)AllInfo[bestPos*N_COL_GRAINMATCHES+14];
nSpotsSim = (int)AllInfo[bestPos*N_COL_GRAINMATCHES+13];
nMatchedArrIndexing[nSpotsIndexed*3+0] = nSpotsMatched;
nMatchedArrIndexing[nSpotsIndexed*3+1] = nSpotsSim;
nMatchedArrIndexing[nSpotsIndexed*3+2] = nMatchedTillNow;
idsIndexed[nSpotsIndexed] = SpotIDs_h[i];
memcpy(spotsYZO+nMatchedTillNow*9, SpotsInfo + bestPos*n_hkls_h*2*9, nSpotsMatched*9*sizeof(RealType));
memcpy(LatCIn_h+nSpotsIndexed*6, RTParamArr_h+5+MAX_N_RINGS+8, 6*sizeof(RealType));
memcpy(FitParams_h+nSpotsIndexed*12, AllInfo + bestPos*N_COL_GRAINMATCHES + 10, 3*sizeof(RealType)); // Pos
for (int j=0;j<3;j++){
memcpy(&OrientTr[j][0],AllInfo+bestPos*N_COL_GRAINMATCHES+1+3*j,3*sizeof(RealType));
}
OrientMat2Euler(OrientTr,EulerTr);
memcpy(FitParams_h+nSpotsIndexed*12+3,EulerTr,3*sizeof(RealType)); // Orientation
memcpy(FitParams_h+nSpotsIndexed*12+6,LatCIn_h+nSpotsIndexed*6,6*sizeof(RealType)); // LatticeParameter
nSpotsIndexed++;
nMatchedTillNow += nSpotsMatched;
}
}
printf("Out of %d IDs, %d IDs were indexed.\n",nspids,nSpotsIndexed);
cudaFree(GS);
cudaFree(Orientations);
cudaFree(AllTheorSpots);
cudaFree(AllGrainSpots);
cudaFree(nSpotsArr);
cudaFree(IAs);
cudaFree(nMatchedArr);
cudaFree(data);
cudaFree(nData);
cudaFree(sps);
cudaFree(ObsSpotsLab);
cudaFree(ResultArr);
cudaFree(etamargins_d);
cudaFree(nNormals);
cudaFree(ResultMakeOrientations);
printf("Time elapsed after sorting the results: %lfs\nNow refining results.\n",cpuSecond()-iStart);
// We have spotsYZO, FitParams_h, we just call the function to run things.
int startRow, endRow, startRowNMatched, endRowNMatched, nrows, nrowsNMatched;
RealType *SpotsCompReturnArr, *SpListArr, *ErrorArr;
SpotsCompReturnArr = (RealType *)malloc(nMatchedTillNow*22*sizeof(RealType));
SpListArr = (RealType *)malloc(nMatchedTillNow*9*sizeof(RealType));
ErrorArr = (RealType *)malloc(nSpotsIndexed*3*sizeof(RealType));
int nJobGroups = nSpotsIndexed/(2*nCores) + 1;
int maxNJobs = 2*nCores;
int *nMatchedArr_d2;
int sizeNMatched = maxNJobs*(int)(((RealType)nMatchedTillNow/(RealType)nSpotsIndexed)*1.5);
int *tempNMatchedArr;
tempNMatchedArr = (int *)malloc(maxNJobs*3*sizeof(int));
RealType *scratchspace, *hklspace, *xspace, *xstepspace, *xlspace, *xuspace, *xoutspace,
*TheorSpotsArr, *SpotsMatchedArr_d2, *FitParams_d2, *CorrectSpots, *TheorSpotsCorr,
*FitResultArr, *FitResultArr_h, *LatCArr, *LatCIn_d2;
cudaMalloc((int **)&nMatchedArr_d2,maxNJobs*3*sizeof(RealType));
cudaMalloc((RealType **)&scratchspace,(3*maxNJobs+(maxNJobs+1)*(maxNJobs+1))*sizeof(RealType));
cudaMalloc((RealType **)&hklspace,maxNJobs*n_hkls_h*7*sizeof(RealType));
cudaMalloc((RealType **)&xspace,12*maxNJobs*sizeof(RealType));
cudaMalloc((RealType **)&xstepspace,12*maxNJobs*sizeof(RealType));
cudaMalloc((RealType **)&xlspace,12*maxNJobs*sizeof(RealType));
cudaMalloc((RealType **)&xuspace,12*maxNJobs*sizeof(RealType));
cudaMalloc((RealType **)&xoutspace,12*maxNJobs*sizeof(RealType));
cudaMalloc((RealType **)&TheorSpotsArr,n_hkls_h*2*8*maxNJobs*sizeof(RealType));
cudaMalloc((RealType **)&TheorSpotsCorr,n_hkls_h*2*8*maxNJobs*sizeof(RealType));
cudaMalloc((RealType **)&nMatchedArr_d2,3*maxNJobs*sizeof(int));
cudaMalloc((RealType **)&SpotsMatchedArr_d2,sizeNMatched*9*sizeof(RealType));
cudaMalloc((RealType **)&CorrectSpots,sizeNMatched*6*sizeof(RealType));
cudaMalloc((RealType **)&FitParams_d2,12*maxNJobs*sizeof(RealType));
cudaMalloc((RealType **)&FitResultArr,12*maxNJobs*sizeof(RealType));
FitResultArr_h = (RealType *) malloc(maxNJobs*12*sizeof(RealType));
cudaMalloc((RealType **)&LatCIn_d2,maxNJobs*6*sizeof(RealType));
LatCArr = (RealType *) malloc(maxNJobs*6*sizeof(RealType));
RealType *hkls_dcorr, *SpCmp_d2, *Error_d2;
cudaMalloc((RealType **)&hkls_dcorr,maxNJobs*n_hkls_h*7*sizeof(RealType));
cudaMalloc((RealType **)&SpCmp_d2, sizeNMatched*22*sizeof(RealType));
cudaMalloc((RealType **)&Error_d2, maxNJobs*3*sizeof(RealType));
for (int jobNr=0;jobNr<nJobGroups;jobNr++){
printf("Optimization set: %d out of %d\n",jobNr,nJobGroups);
startRow = jobNr*maxNJobs;
endRow = (jobNr + 1 != nJobGroups) ? ((jobNr+1)*maxNJobs)-1 : ((nSpotsIndexed-1)%maxNJobs);
nrows = endRow - startRow + 1;
startRowNMatched = nMatchedArrIndexing[startRow*3+2];
endRowNMatched = nMatchedArrIndexing[(endRow)*3+2] + nMatchedArrIndexing[(endRow)*3];
nrowsNMatched = endRowNMatched - startRowNMatched;
n_arr_h[2] = nrows;
cudaMemcpy(n_arr,n_arr_h,3*sizeof(int),cudaMemcpyHostToDevice);
nSpotsMatched = 0;
for (int i=0;i<nrows;i++){
tempNMatchedArr[i*3] = nMatchedArrIndexing[(i+startRow)*3];
tempNMatchedArr[i*3+1] = nMatchedArrIndexing[(i+startRow)*3+1];
tempNMatchedArr[i*3+2] = nSpotsMatched;
nSpotsMatched += nMatchedArrIndexing[(i+startRow)*3];
}
printf("%d %d %d %d %d %d %d %d\n",nrows,nrowsNMatched,startRow,endRow, startRowNMatched, endRowNMatched, nSpotsIndexed, nMatchedTillNow);
cudaMemcpy(nMatchedArr_d2,tempNMatchedArr,3*nrows*sizeof(int),cudaMemcpyHostToDevice);
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
cudaMemcpy(SpotsMatchedArr_d2,spotsYZO+startRowNMatched*9,nrowsNMatched*9*sizeof(RealType),cudaMemcpyHostToDevice);
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
cudaMemcpy(FitParams_d2,FitParams_h+12*startRow,12*nrows*sizeof(RealType),cudaMemcpyHostToDevice);
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
dim3 blockf (32);
dim3 gridf ((maxNJobs/blockf.x)+1);
// Call the optimization routines.
FitGrain<<<gridf,blockf>>>(RTParamArr,IntParamArr,n_arr,OmeBoxArr,
hkls_d, HKLints_d,nMatchedArr_d2,SpotsMatchedArr_d2,FitParams_d2,
TheorSpotsArr, scratchspace, hklspace, xspace, xlspace, xuspace,
xoutspace,xstepspace, CorrectSpots, TheorSpotsCorr, FitResultArr);
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
cudaMemcpy(FitResultArr_h,FitResultArr,12*nrows*sizeof(RealType),cudaMemcpyDeviceToHost);
CalcAngleErrors<<<gridf,blockf>>>(RTParamArr,IntParamArr,n_arr,
OmeBoxArr,hkls_dcorr,nMatchedArr_d2,SpotsMatchedArr_d2,FitResultArr,
TheorSpotsArr,SpCmp_d2,Error_d2, hkls_d, HKLints_d);
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
cudaMemcpy(SpotsCompReturnArr+22*startRowNMatched,SpCmp_d2,22*nrowsNMatched*sizeof(RealType),cudaMemcpyDeviceToHost);
cudaMemcpy(ErrorArr+3*startRow,Error_d2,3*nrows*sizeof(RealType),cudaMemcpyDeviceToHost);
printf("Finished one set of optimizations in: %lfseconds.\n",cpuSecond()-iStart);
}
// We have idsIndexed with the successful IDs.
// nMatchedArrIndexing to guide where to look
// SpotsCompReturnArr with the info about each matched spot,
// ErrorArr for errors and
// FitResultArr_h with all the fit parameters.
// First move spotIDsfn to a backup so that we don't overwrite this.
char cmd[MAX_LINE_LENGTH];
sprintf(cmd,"mv %s %s.orig",spotIDsfn,spotIDsfn);
system(cmd);
char outIDsfn[MAX_LINE_LENGTH];
sprintf(outIDsfn,"%s/SpotsToIndex.csv",folder);
char fitbestfn[MAX_LINE_LENGTH];
char opfitfn[MAX_LINE_LENGTH];
sprintf(fitbestfn,"%s/FitBest.bin",Parameters.ResultFolder);
FILE *fb;
fb = fopen(fitbestfn,"w");
fwrite(SpotsCompReturnArr,nMatchedTillNow*22*sizeof(RealType),1,fb);
sprintf(opfitfn,"%s/OrientPosFit.bin",Parameters.ResultFolder);
FILE *fo;
fo = fopen(opfitfn,"w");
FILE *outidsfile;
outidsfile = fopen(outIDsfn,"w");
RealType *OpArr;
OpArr = (RealType *) malloc(nSpotsIndexed*25*sizeof(RealType));
RealType OrientMat[3][3];
for (int i=0;i<nSpotsIndexed;i++){
fprintf(outidsfile,"%d\n",idsIndexed[i]);
OpArr[i*25+0] = (RealType) idsIndexed[i];
Euler2OrientMat_h(FitResultArr_h+i*12+3,OrientMat);
for (int j=0;j<3;j++){
for (int k=0;k<3;k++){
OpArr[i*25+1+j*3+k] = OrientMat[j][k];
}
OpArr[i*25+10+j] = FitResultArr_h[i*12+j];
OpArr[i*25+13+j] = FitResultArr_h[i*12+6+j];
OpArr[i*25+16+j] = FitResultArr_h[i*12+9+j];
OpArr[i*25+19+j] = ErrorArr[i*3+j];
OpArr[i*25+22+j] = (RealType)nMatchedArrIndexing[i*3+j];
}
printf("%lf %lf %lf\n",ErrorArr[i*3+0],ErrorArr[i*3+1],ErrorArr[i*3+2]);
}
fwrite(OpArr,25*nSpotsIndexed*sizeof(RealType),1,fo);
cudaDeviceReset();
printf("Time elapsed: %fs\n",cpuSecond()-iStart);
return 0;
}
|
21,855 | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
__global__ void cuda_vector_add(int *a, int *b)
{
__shared__ int results[64]; // Actually we don't need this, just for illustration
int global_thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_thread_id = threadIdx.x;
results[local_thread_id] = a[global_thread_id] + b[global_thread_id];
__syncthreads();
a[global_thread_id] = results[local_thread_id];
}
int main(int argc, char **argv)
{
int n, nBytes;
n = atoi(argv[1]);
n = (n + 63) / 64 * 64;
nBytes = sizeof(int) * n;
printf("Vector add, length = %d\n", n);
int *h_a, *h_b, *d_a, *d_b;
// Allocate memory on host
h_a = (int*) malloc(nBytes);
h_b = (int*) malloc(nBytes);
// Allocate memory on device
cudaMalloc((void**) &d_a, nBytes);
cudaMalloc((void**) &d_b, nBytes);
// Init data on host
for (int i = 0; i < n; i++)
{
h_a[i] = 114 + i;
h_b[i] = 514 - i;
}
// Copy data to device
cudaMemcpy(d_a, h_a, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, nBytes, cudaMemcpyHostToDevice);
// Set kernel arguments and launch kernel
dim3 block(64);
dim3 grid(n / block.x);
cuda_vector_add<<<grid, block>>>(d_a, d_b);
// Generate result on host
for (int i = 0; i < n; i++) h_b[i] += h_a[i];
// Copy result from device to host
cudaMemcpy(h_a, d_a, nBytes, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Check the results
for (int i = 0; i < n; i++) assert(h_a[i] == h_b[i]);
printf("Result is correct.\n");
// Free host memory
free(h_a);
free(h_b);
// Free device memory
cudaFree(d_a);
cudaFree(d_b);
return 0;
} |
21,856 | #include "includes.h"
__global__ void addVectors(const int entries, const float *a, const float *b, float *ab){
const int N = threadIdx.x + (16 * blockIdx.x);
if(N < entries)
ab[N] = a[N] + b[N];
} |
21,857 | /* Memocode design
* hash-align.cu
* Uses a static hash table sructure stored in hash_table1.bin and
* hash_table2.bin, based on 24-bit binary strings from the supplied
* genome_file, and performs alignment on the sequence file.
* Sample usage:
*
* ./align human_g1k_v37.bin ERR050082.filt.bin 100 machine_number 0 9999
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include <stdint.h>
#include <sys/time.h>
#include <time.h>
#define HASH_LENGTH 3
#define TABLE_LENGTH 16777216
/* Assumed alignment for sequences in the sequence file, in bytes */
#define SEQUENCE_ALIGN 8
#define TICKS_PER_SEC 2795377101
/*2048 thread blocks of 32 threads each*/
#define NUM_BLOCKS 2048
#define NUM_THREADS 32
typedef unsigned long long ticks;
/* Result of matching a single sequence- Each entry is 8 bytes; */
struct match {
uint32_t position; /* Position of a match if count > 0 */
uint32_t count; /* Number of matches found */
};
static __inline__ ticks getticks(void)
{
unsigned a, d;
asm("cpuid");
asm volatile("rdtsc" : "=a" (a), "=d" (d));
return (((ticks)a) | (((ticks)d) << 32));
}
__global__ void matchKernel ( char *ref_genome_GPU, /*Base pointer of reference genome*/
char *read_seq_GPU, /*Base pointer of read sequence*/
struct match *result_GPU, /*Base pointer of match array*/
uint32_t *GPU_table_1_base, /*Base pointer of hash_table_1*/
uint32_t *GPU_table_2_base, /*Base pointer of hash_table_2*/
uint32_t table_1_start, /*Index of table 1 that stores first table 2 index*/
uint32_t table_1_end, /*Index of table 1 that stores last table 2 index*/
uint32_t kernel, /*CUDA kernel number*/
uint32_t start_table_2, /*First index of table 2 for this machine*/
uint32_t table_1_total, /*Size of table 1 in bytes*/
uint32_t table_2_total) /*Size of table 2 in bytes*/
{
uint8_t k,cur_match,aligner,ref_byte_1,ref_byte_2,seq_byte,flag=0;
uint32_t i,hash_value,start_ref_index, end_ref_index, ref_index;
char *cur_refs;
char cur_seq[25];
result_GPU[(65536*kernel)+(blockIdx.x*NUM_THREADS)+threadIdx.x].count = 0;
result_GPU[(65536*kernel)+(blockIdx.x*NUM_THREADS)+threadIdx.x].position = 0;
/*Copy read sequence from global memory to internal registers*/
for(i=0;i<25;i++)
cur_seq[i]= read_seq_GPU[(65536*kernel+blockIdx.x*NUM_THREADS+threadIdx.x)*32 + i];
/*If it is the last index of table 1, we want to compare till end of table 2*/
if(table_1_end+1 == table_1_total/4)flag=1;
/* Loop through the sequence array */
hash_value = 0;
for (k = 0; k < HASH_LENGTH; k++) {
hash_value |= (cur_seq[k]& 0x000000FF) << ((HASH_LENGTH-k-1)*8);
}
/*Process only if the prefix exists for the block of table 2 present in this machine*/
if(hash_value>=table_1_start && hash_value < table_1_end+flag)
{
/*Indices of the possible entries in table 2 that correspond to the 3 byte prefix*/
start_ref_index = GPU_table_1_base[hash_value];
if(flag==1){
end_ref_index = table_2_total/4;
}else{
end_ref_index = GPU_table_1_base[hash_value+1];
}
for (i = start_ref_index; i < end_ref_index; i++) {
ref_index = GPU_table_2_base[i-start_table_2];
/*Check if the base pair position lies at byte boundary */
aligner = (ref_index%4)*2;
cur_refs = ref_genome_GPU + ref_index/4;
cur_match = 1;
for (k = 0; k < 25; k++) {
seq_byte = cur_seq[k];
ref_byte_1 = cur_refs[k];
ref_byte_2 = cur_refs[k+1];
ref_byte_1 = ref_byte_1 >> aligner;
ref_byte_1 = ref_byte_1 & (0xFF >> aligner);
ref_byte_1 = ref_byte_1 | ((ref_byte_2 & (0xFF >> (8-aligner))) << 8-aligner);
if (seq_byte != ref_byte_1) {
cur_match = 0;
break;
}
}
if (cur_match == 1) {
result_GPU[65536*kernel+blockIdx.x*NUM_THREADS+threadIdx.x].count++;
result_GPU[65536*kernel+blockIdx.x*NUM_THREADS+threadIdx.x].position = ref_index;
}
}
}
}
/*One kernel processes 65536 reasd sequences. When the number of reads is lesser than 65536 or (number of reads)%65536!=0, we use kernel 2 for those reads.It is exactly the same as previous kernel. Except that it uses a thread block for each read*/
__global__ void matchKernel2 ( char *ref_genome_GPU,
char *read_seq_GPU,
struct match *result_GPU,
uint32_t *GPU_table_1_base,
uint32_t *GPU_table_2_base,
uint32_t table_1_start,
uint32_t table_1_end,
uint32_t kernel,
uint32_t start_table_2,
uint32_t table_1_total,
uint32_t table_2_total)
{
uint8_t k,cur_match,aligner,ref_byte_1,ref_byte_2,seq_byte,flag=0;
uint32_t i,hash_value,start_ref_index, end_ref_index, ref_index;
char *cur_refs;
char cur_seq[25];
result_GPU[(65536*kernel)+blockIdx.x].count = 0;
result_GPU[(65536*kernel)+blockIdx.x].position = 0;
/*Copy read sequence from global memory to internal registers*/
for(i=0;i<25;i++)
cur_seq[i]= read_seq_GPU[(65536*kernel+blockIdx.x)*32 + i];
/*If it is the last index of table 1, we want to compare till end of table 2*/
if(table_1_end+1 == table_1_total/4)flag=1;
/* Loop through the sequence array */
hash_value = 0;
for (k = 0; k < HASH_LENGTH; k++) {
cur_seq[k] = cur_seq[k] ;
hash_value |= (cur_seq[k]& 0x000000FF) << ((HASH_LENGTH-k-1)*8);
}
/*Process only if the prefix exists for the block of table 2 present in this machine*/
if(hash_value>=table_1_start && hash_value < table_1_end+flag)
{
/*Indices of the possible entries in table 2 that correspond to the 3 byte prefix*/
start_ref_index = GPU_table_1_base[hash_value];
if(flag==1){
end_ref_index = table_2_total/4;
}else{
end_ref_index = GPU_table_1_base[hash_value+1];
}
for (i = start_ref_index; i < end_ref_index; i++) {
ref_index = GPU_table_2_base[i-start_table_2];
/*Check if the base pair position lies at byte boundary */
aligner = (ref_index%4)*2;
cur_refs = ref_genome_GPU + ref_index/4;;
cur_match = 1;
for (k = HASH_LENGTH; k < 25; k++) {
seq_byte = cur_seq[k];
ref_byte_1 = cur_refs[k];
ref_byte_2 = cur_refs[k+1];
ref_byte_1 = ref_byte_1 >> aligner;
ref_byte_1 = ref_byte_1 & (0xFF >> aligner);
ref_byte_1 = ref_byte_1 | ((ref_byte_2 & (0xFF >> (8-aligner))) << 8-aligner);
if (seq_byte != ref_byte_1) {
cur_match = 0;
break;
}
}
if (cur_match == 1) {
result_GPU[65536*kernel+blockIdx.x].count++;
result_GPU[65536*kernel+blockIdx.x].position = ref_index;
}
}
}
}
/* Attempt to match an array of two-bit sequences (*sequences)
* against a reference sequence (*reference_base).
*
* Fill in the *matches array with the results.
*
* This allocates a buffer large enough to hold a single sequence
* and shifts a portion of the reference sequence through it, based on
* the starting indexes pointed to by hash_table1.
* At each point, this buffer is compared to each of the given sequences.
*/
void match(struct match *matches, /* Output array, one per sequence */
uint32_t *table1_base, /* Beginning of hash_table1 */
off_t table1_total, /* Number of bytes in hash_table1 */
uint32_t *table2_base, /* Beginning of hash_table2 */
off_t table2_total, /* Number of bytes in hash_table2 */
char *reference_base, /* Beginning of reference sequence */
off_t reference_total, /* Number of bytes in the reference */
char *sequences, /* Beginning of sequences */
int32_t start_sequence, /* Index of first sequence in array */
int32_t sequence_length, /* Base pairs per sequence */
int32_t end_sequence, /* One more than index of last sequence */
uint8_t machine )
{
uint8_t i,no_of_kernels=0;
uint32_t temp=0,start_ref_index=0, end_ref_index,last_kernel=0,start_table_1=0;
uint32_t *GPU_table_1_base,*GPU_table_2_base,*table_2_start;;
char *ref_genome_GPU;
char *read_seq_GPU;
struct match *result_GPU;
char *read_sequence;
dim3 dimBlock(NUM_THREADS,1,1);
dim3 dimGrid(NUM_BLOCKS,1,1);
ticks time1,time2,time3;
double elapsed;
/* Bytes per sequence, padded to be a multiple of SEQUENCE_ALIGN */
int sequence_bytes = ((sequence_length + SEQUENCE_ALIGN * 4 - 1) &
~(SEQUENCE_ALIGN*4 - 1)) >> 2;
read_sequence = sequences + sequence_bytes*start_sequence;
/*Start index of table 1 depends on the machine(1-23) executing the code*/
switch(machine)
{
case 1 : start_table_1 = 0;
break;
case 2 : start_table_1 = 324703;
break;
case 3 : start_table_1 = 915552;
break;
case 4 : start_table_1 = 1499762;
break;
case 5 : start_table_1 = 2138400;
break;
case 6 : start_table_1 = 2985073;
break;
case 7 : start_table_1 = 3652892;
break;
case 8 : start_table_1 = 4358573;
break;
case 9 : start_table_1 = 5311958;
break;
case 10 : start_table_1 = 6225932;
break;
case 11 : start_table_1 = 7269752;
break;
case 12 : start_table_1 = 8126309;
break;
case 13 : start_table_1 = 8701463;
break;
case 14 : start_table_1 = 9393143;
break;
case 15 : start_table_1 = 11240046;
break;
case 16 : start_table_1 = 11862667;
break;
case 17 : start_table_1 = 12525596;
break;
case 18 : start_table_1 = 13032273;
break;
case 19 : start_table_1 = 13687336;
break;
case 20 : start_table_1 = 14414758;
break;
case 21 : start_table_1 = 15248491;
break;
case 22 : start_table_1 = 15872775;
break;
case 23 : start_table_1 = 16503085;
}
printf("Machine :%d\n ", machine);
/*Calculate the start and end indices for this machine*/
start_ref_index= table1_base[start_table_1];
/*Take in table 2 entries until size > 500 MB or end of table_1 is reached*/
temp = start_table_1;
while((table1_base[temp]-table1_base[start_table_1]<125000000) && temp!= table1_total/4)temp++;
if(temp == table1_total/4){
end_ref_index=table2_total/4;
}else{
end_ref_index=table1_base[temp-1];
}
table_2_start = table2_base + start_ref_index;
/*Allocate space on GPU RAM for reference genome, results and hash_tables*/
cudaMalloc((void**) &ref_genome_GPU, reference_total);
cudaMalloc((void**) &result_GPU, (end_sequence-start_sequence)*sizeof(struct match));
cudaMalloc((void**) &GPU_table_1_base,table1_total);
cudaMalloc((void**) &GPU_table_2_base,4*(end_ref_index-start_ref_index) );
/*Copy data from Host to GPU RAM*/
cudaMemcpy(ref_genome_GPU,reference_base,reference_total,cudaMemcpyHostToDevice);
cudaMemcpy(GPU_table_1_base,table1_base,table1_total,cudaMemcpyHostToDevice);
cudaMemcpy(GPU_table_2_base,table_2_start,4*(end_ref_index-start_ref_index),cudaMemcpyHostToDevice);
/*Start clock*/
time1=getticks();
/*Allocate space for read sequences and copy them into the GPU RAM*/
cudaMalloc((void**) &read_seq_GPU, (end_sequence-start_sequence)*sequence_bytes);
cudaMemcpy(read_seq_GPU,read_sequence,(end_sequence-start_sequence)*sequence_bytes,cudaMemcpyHostToDevice);
if(end_sequence-start_sequence > 65535){
no_of_kernels = (end_sequence-start_sequence)/65536;
last_kernel = (end_sequence-start_sequence)%65536;
for(i=0;i<no_of_kernels;i++){
/*Kernel Call*/
matchKernel<<<dimGrid,dimBlock>>>
(ref_genome_GPU,read_seq_GPU,result_GPU,GPU_table_1_base,GPU_table_2_base,
start_table_1,temp-1,i,start_ref_index,table1_total,table2_total);
/*Copy the results back to Host. This blocking call also makes sure that all thread writing to the result array have finished*/
cudaMemcpy(matches,result_GPU,sizeof(struct match)*(end_sequence-start_sequence),cudaMemcpyDeviceToHost);
}
/*Process the remaining [(Total read sequences)%65536] read sequences */
matchKernel2<<<last_kernel,1>>>
(ref_genome_GPU,read_seq_GPU,result_GPU,GPU_table_1_base,GPU_table_2_base,
start_table_1,temp-1,no_of_kernels,start_ref_index,table1_total,table2_total);
cudaMemcpy(matches,result_GPU,sizeof(struct match)*(end_sequence-start_sequence),cudaMemcpyDeviceToHost);
}
else{
/*If total reads < 65536, we do not need multiple kernels*/
matchKernel2<<<(end_sequence-start_sequence),1>>>
(ref_genome_GPU,read_seq_GPU,result_GPU,GPU_table_1_base,GPU_table_2_base,
start_table_1,temp-1,0,start_ref_index,table1_total,table2_total);
/*Copy the results back to Host. This blocking call also makes sure that all thread writing to the result array have finished*/
cudaMemcpy(matches,result_GPU,sizeof(struct match)*(end_sequence-start_sequence),cudaMemcpyDeviceToHost);
}
time2 = getticks();
for (temp = start_sequence ; temp != end_sequence ; ++temp) {
printf("%9d: ", temp);
if (matches[temp - start_sequence].count) {
printf("%9ld", matches[temp - start_sequence].position);
if (matches[temp-start_sequence].count > 1)
printf(" + %d others", matches[temp-start_sequence].count - 1);
printf("\n");
} else
printf("-\n");
}
time3 = getticks();
printf("Clock tick value at start: %ld\n",time1);
printf("Clock tick value at end of computation: %ld\n",time2);
printf("Clock tick value at end of print: %ld\n",time3);
elapsed = (double)(time2 - time1) / TICKS_PER_SEC;
printf("Time taken for computation: %f seconds \n",elapsed);
elapsed = (double)(time3 - time2) / TICKS_PER_SEC;
printf("Time taken for printing: %f seconds \n",elapsed);
elapsed = (double)(time3 - time1) / TICKS_PER_SEC;
printf("Total time: %f seconds \n",elapsed);
cudaFree(ref_genome_GPU);
cudaFree(read_seq_GPU);
cudaFree(result_GPU);
cudaFree(GPU_table_1_base);
cudaFree(GPU_table_2_base);
}
/*
* Process command-line arguments, map reference and sequence data into
* memory, call match(), and print the results
*/
int main(int argc, const char *argv[]) {
const char *reference_filename, *sequence_filename,*machine_p;
int32_t reference_fd = -1, sequence_fd = -1;
int32_t table1_fd = -1, table2_fd = -1;
int sequence_length, sequence_bytes, start_sequence = -1, end_sequence = -1,num_sequences, i;
off_t reference_total, sequence_total, sequence_window_offset;
off_t table1_total, table2_total;
size_t sequence_window_length;
long page_size;
void *reference_base, *sequence_base, *sequences;
void *table1_base, *table2_base;
struct stat file_status;
struct match *matches;
uint8_t machine;
char machine_s[2];
page_size = sysconf(_SC_PAGE_SIZE); /* needed for mmap */
if (argc < 5) goto usage;
reference_filename = argv[1];
sequence_filename = argv[2];
sequence_length = atoi(argv[3]);
machine_p = argv[4];
strcpy(machine_s,machine_p+7);
machine = atoi(machine_s);
if (machine <= 0 || machine > 23) {
fprintf(stderr,
"Error: Machine number must be an integer between 1 and 23\n");
goto usage;
}
/* Pad the sequences out to a multiple of SEQUENCE_ALIGN bytes */
sequence_bytes = ((sequence_length + SEQUENCE_ALIGN * 4 - 1) &
~(SEQUENCE_ALIGN * 4 - 1)) >> 2;
if (sequence_length <= 0) {
fprintf(stderr,
"Error: given sequence length must be an integer greater than zero\n");
goto usage;
}
if ((reference_fd = open(reference_filename, O_RDONLY)) < 0) {
fprintf(stderr, "Error opening reference file \"%s\": ",
reference_filename);
perror((const char *) 0);
goto usage;
}
if (fstat(reference_fd, &file_status)) {
fprintf(stderr, "Error checking reference file \"%s\": ",
reference_filename);
perror((const char *) 0);
goto usage;
}
reference_total = file_status.st_size;
if (reference_total < sequence_bytes) {
fprintf(stderr, "Error: reference file is shorter than the given sequence length (%d)\n", sequence_length);
goto usage;
}
if ((sequence_fd = open(sequence_filename, O_RDONLY)) < 0) {
fprintf(stderr, "Error opening sequence file \"%s\": ", sequence_filename);
perror((const char *) 0);
goto usage;
}
if (fstat(sequence_fd, &file_status)) {
fprintf(stderr, "Error checking sequence file \"%s\": ",
sequence_filename);
perror((const char *) 0);
goto usage;
}
sequence_total = file_status.st_size;
if (sequence_total < sequence_bytes) {
fprintf(stderr, "Sequence file is too small\n");
goto usage;
}
if (sequence_total % sequence_bytes != 0)
fprintf(stderr, "Warning: sequence file may be truncated\n");
num_sequences = sequence_total / sequence_bytes;
if (argc > 5) start_sequence = atoi(argv[5]);
if (start_sequence < 0) start_sequence = 0;
if (start_sequence >= num_sequences) {
fprintf(stderr, "Error: initial sequence number must be less than %d\n",
num_sequences);
goto usage;
}
if (argc > 6) end_sequence = atoi(argv[6]);
else end_sequence = num_sequences;
if (end_sequence < start_sequence || end_sequence > num_sequences) {
fprintf(stderr, "Error: End sequence number must be between %d and %d\n",
start_sequence, num_sequences);
goto closeexit;
}
/* Open the hash table files */
if ((table1_fd = open("hash_table1.bin", O_RDONLY)) < 0) {
fprintf(stderr, "Error opening hash_table1.bin file");
perror((const char *) 0);
goto usage;
}
if (fstat(table1_fd, &file_status)) {
fprintf(stderr, "Error checking hash_table1.bin file");
perror((const char *) 0);
goto usage;
}
table1_total = file_status.st_size;
/* Open the hash table files */
if ((table2_fd = open("hash_table2.bin", O_RDONLY)) < 0) {
fprintf(stderr, "Error opening hash_table2.bin file");
perror((const char *) 0);
goto usage;
}
if (fstat(table2_fd, &file_status)) {
fprintf(stderr, "Error checking hash_table2.bin file");
perror((const char *) 0);
goto usage;
}
table2_total = file_status.st_size;
/* mmap the reference data */
reference_base = mmap( (void *) 0, reference_total, PROT_READ, MAP_SHARED,
reference_fd, 0);
if (reference_base == MAP_FAILED) {
perror("Error when attempting to map the reference file");
goto unmap_references;
}
/* mmap the hash_table data */
table1_base = mmap( (void *) 0, table1_total, PROT_READ, MAP_SHARED,
table1_fd, 0);
if (table1_base == MAP_FAILED) {
perror("Error when attempting to map the hash_table1 file");
goto unmap_table1;
}
/* mmap the hash_table data */
table2_base = mmap( (void *) 0, table2_total, PROT_READ, MAP_SHARED,
table2_fd, 0);
if (table2_base == MAP_FAILED) {
perror("Error when attempting to map the hash_table1 file");
goto unmap_table2;
}
/* mmap the sequence data */
/* compute the starting location by rounding down to the nearest
page boundary; window length is the difference between this and the last
page on which the sequences fall */
sequence_window_offset = (start_sequence * sequence_bytes) & ~(page_size - 1);
sequence_window_length = (((end_sequence * sequence_bytes) + (page_size - 1))
& ~(page_size - 1)) - sequence_window_offset;
sequence_base = mmap( (void *) 0, sequence_window_length, PROT_READ,
MAP_SHARED, sequence_fd, sequence_window_offset);
if (sequence_base == MAP_FAILED) {
perror("Error when attempting to map the sequence file");
goto closeexit;
}
sequences = (char *)sequence_base +
((start_sequence * sequence_bytes) - sequence_window_offset);
/* Allocate space to hold the results of matching */
matches = (struct match *)malloc(sizeof(struct match)*(end_sequence-start_sequence));
if (matches == NULL) {
fprintf(stderr, "Failed to allocate memory for match information\n");
goto unmap_sequences;
}
/*Initialize the output array to a random value to understand if the kernel crashed*/
for ( i = 0 ; i < end_sequence - start_sequence ; i++ )
matches[i].count = 13;
/* Call the sequence matcher with the starting address of the reference,
starting address of the sequences, the ending address, the number
of pairs in the sequence, and the length of the reference */
match(matches,
(uint32_t *)table1_base, table1_total,
(uint32_t *)table2_base, table2_total,
(char*)reference_base, reference_total,
(char*)sequences, start_sequence, sequence_length, end_sequence,machine);
/* Report all matches */
free(matches);
unmap_sequences:
if (munmap(sequence_base, sequence_window_length)) {
perror("Error when unmapping the sequence file");
goto closeexit;
}
unmap_references:
if (munmap(reference_base, reference_total)) {
perror("Error when unmapping the reference file");
goto closeexit;
}
unmap_table1:
if (munmap(table1_base, table1_total)) {
perror("Error when unmapping the hash_table1 file");
goto closeexit;
}
unmap_table2:
if (munmap(table2_base, table2_total)) {
perror("Error when unmapping the hash_table2 file");
goto closeexit;
}
close(sequence_fd);
close(reference_fd);
close(table1_fd);
close(table2_fd);
return 0;
usage:
fprintf(stderr,
"usage: align <reference-genome> <sequence-file> <sequence-length> <machine-number> <start> <end>\n"
"<reference-genome> is the name of a packed binary reference sequence.\n"
"<sequence-file> is the name of a packed binary sequence file.\n"
"<sequence-length> is an integer indicating the length, in base pairs, of each sequence.\n"
"<machine-number> is an integer specifying the machine. Value should be between 1 and 23"
"<start> is the optional starting sequence number. If omitted, it defaults to the start of the sequence file.\n"
"<end> is the optional ending sequence number. If omitted, it defaults to the end of the sequence file.\n");
closeexit:
if (reference_fd >= 0) close(reference_fd);
if (sequence_fd >= 0) close(sequence_fd);
return 1;
}
|
21,858 | #include <cuda.h>
#include <stdio.h>
#define THREADS 16
#define BLOCKS 8
__global__ void __add__(int *array, int *size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > *size) return;
int temp = 0;
int before = (idx + 1) % *size;
int after = idx - 1;
if (after < 0) after = *size - 1;
temp += array[idx];
temp += array[before];
temp += array[after];
__syncthreads(); // Barrera...
array[idx] = temp;
}
extern "C" void call_device_sum(int *h_a, int size)
{
int *dev_a = NULL;
int *dev_size = NULL;
cudaMalloc(&dev_size, sizeof(int));
cudaMemset(dev_size, 0, sizeof(int));
cudaMemcpy(dev_size, &size, sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&dev_a, (size + 2) * sizeof(int));
cudaMemset(dev_a, 0, (size + 2) * sizeof(int));
cudaMemcpy(dev_a, h_a, (size + 2) * sizeof(int), cudaMemcpyHostToDevice);
__add__ <<<BLOCKS, THREADS>>>(dev_a, dev_size);
// se transfieren los datos del dispositivo a memoria.
cudaMemcpy(h_a, dev_a, (size + 2) * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_size);
}
|
21,859 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;//ʹ1blocḳ߳Ҫʹthredid οp31ҳ
c[i] = a[i] + b[i];
}
int main() {
int width = 1920;
int height = 1080;
float* img = new float[width * height];
for (int row = 0; row < height; row++)
{
for (int col = 0; col < width; col++) {
img[row * width + col] = (col + row) % 256;
}
}
int kernelSize = 3;
float* kernel = new float[kernelSize * kernelSize];
for (int i = 0; i < kernelSize * kernelSize; i++)
{
kernel[i] = i % kernelSize - 1;
}
//visualization
for (int row = 0; row < 10; row++)
{
for (int col = 0; col < 10; col++) {
printf("%2.0f", img[row * width + col]);
}
printf("\n");
}
return 0;
} |
21,860 | #include <stdio.h>
#include <stdlib.h>
#define n 4
__device__
void dekomposisi(double A[][n], double D[][n]) {
int i, j, k, p, q, stop = 0;
double sum = 0;
for (p = 0; p < n; p++) {
for (j = p; j < n; j++) {
sum = 0;
for (k = 0; k < p; k++) {
sum += D[p][k] * D[k][j];
}
D[p][j] = A[p][j] - sum;
}
q = p;
for (i = q + 1; i < n; i++) {
sum = 0;
for(k = 0; k < q; k++) {
sum += D[i][k] * D[k][q];
}
if (D[q][q] == 0) {
printf("U[%d][%d] == 0\n Tidak bisa dibagi 0...\n", q, q);
stop = 1;
} else {
D[i][q] = (A[i][q] - sum) / D[q][q];
}
if (stop) {
break;
}
}
if (stop) {
break;
}
}
}
__device__
void sulih(double D[][n], double b[n], double x[n], double *y) {
int i, j;
double sum;
for (i = 0; i < n; i++) {
sum = 0;
for (j = 0; j < i; j++) {
sum += y[j] * D[i][j];
}
y[i] = b[i] - sum;
}
for (i = n-1; i >= 0; i--) {
sum = 0;
for (j = i+1; j < n; j++) {
sum += x[j] * D[i][j];
}
x[i] = (y[i] - sum) / D[i][i];
}
free(y);
}
__device__
void print_LU(double D[][n]) {
int i, j;
printf("Dekomposisi\n");
printf("L =\n");
for (i = 0; i < n; i++) {
printf("\t");
for (j = 0; j < i; j++) {
printf("%8.4f ", D[i][j]);
}
printf("%8d\n", 1);
}
printf("U =\n");
for (i = 0; i < n; i++) {
printf("\t");
for (j = 0; j < i; j++) {
printf("%8s ", "");
}
for (j = i; j < n; j++) {
printf("%8.4f ", D[i][j]);
}
printf("\n");
}
}
__device__
void print_x(double x[n]) {
int i;
printf("Solusi\n");
printf("x =\n");
for (i = 0; i < n; i++) {
printf("\t%8.4f\n", x[i]);
}
}
__global__
void crout(double *y) {
double A[][n] = {
{0.31, 0.14, 0.30, 0.27},
{0.26, 0.32, 0.18, 0.24},
{0.61, 0.22, 0.20, 0.31},
{0.40, 0.34, 0.36, 0.17},
// {0.7071, 0, 1, 0, 0.5, 0, 0, 0, 0},
// {0, 1, 0, 0, 0, -1, 0, 0, 0},
// {0, 0, -1, 0, 0, 0, 0, 0, 0},
// {0, 0, 0, 1, 0, 0, 0, 0, -0.7071},
// {0.7071, 0, 0, -1, -0.8660, 0, 0, 0, 0},
// {0, 0, 0, 0, 0, 0, 1, 0, 0.7071},
// {0, 0, 0, 0, -0.5, 0, -1, 0, 0},
// {0, 0, 0, 0, 0.8660, 1, 0, -1, 0},
// {0, 0, 0, 0, 0, 0, 0, 0, 0.7071},
// { 0.866, 0, -0.5, 0, 0, 0},
// { 0, 1, 0.5, 0, 0, 0},
// { 0.5, 0, 0.866, 0, 0, 0},
// {-0.866, -1, 0, -1, 0, 0},
// { -0.5, 0, 0, 0, -1, 0},
// { 0, 0, -0.866, 0, 0, -1},
};
double b[n] = {
1.02,
1.00,
1.34,
1.27,
// -1000,
// 0,
// 0,
// 0,
// 0,
// 500,
// -500,
// 0,
// 0,
// 0,
// 0,
// -1000,
// 0,
// 0,
// 0,
};
double D[][n] = {
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0},
};
double x[n] = {
0,
0,
0,
0,
};
dekomposisi(A, D);
print_LU(D);
sulih(D, b, x, y);
print_x(x);
}
int main(int argc, char *argv[] ) {
double *y;
cudaMalloc(&y, n*sizeof(double));
crout<<<1, 1>>>(y);
cudaDeviceSynchronize();
return 0;
}
|
21,861 | typedef double svm_precision;
#define thread_group_size 64
struct constantBuffer{
svm_precision cb_kernelParam1;
svm_precision cb_kernelParam2;
unsigned int cb_instanceLength;
unsigned int cb_instanceCount;
unsigned int cb_classIndex;
// Run flags
unsigned int cb_kernel;
svm_precision cb_param1;
svm_precision cb_param2;
int cb_ind1;
int cb_ind2;
};
struct evalStruct{
int ind1;
int ind2;
int ind3;
}; |
21,862 | #include <stdio.h>
__global__ void add(int *a, int *b, int *c, int num)
{
int i = threadIdx.x;
if (i < num)
{
c[i] = b[i] + a[i];
}
}
int main(int argc, char const *argv[])
{
// init data
const int num = 10;
int a[num], b[num], c[num];
int *a_gpu, *b_gpu, *c_gpu;
for (auto i = 0; i < num; i++)
{
a[i] = i;
b[i] = i * i;
}
cudaMalloc((void **)&a_gpu, sizeof(a));
cudaMalloc((void **)&b_gpu, sizeof(b));
cudaMalloc((void **)&c_gpu, sizeof(c));
// copy data
cudaMemcpy(a_gpu, a, sizeof(a), cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu, b, sizeof(b), cudaMemcpyHostToDevice);
// do
add<<<1, num>>>(a_gpu, b_gpu, c_gpu, num);
cudaMemcpy(c, c_gpu, sizeof(c), cudaMemcpyDeviceToHost);
// viz
for (size_t i = 0; i < num; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
return 0;
}
|
21,863 | #include "includes.h"
__global__ void gpu_reduce(int *c, int size)
{
/*Identificaciones necesarios*/
int IDX_Thread = threadIdx.x;
int IDY_Thread = threadIdx.y;
int IDX_block = blockIdx.x;
int IDY_block = blockIdx.y;
int shapeGrid_X = gridDim.x;
int threads_per_block = blockDim.x * blockDim.y;
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
if(position<size){
if(size%2 != 0)
{
if(c[position]<c[size-1])
{
c[position]=c[size-1];
}
}else{
if(c[position]<c[position+size/2])
{
c[position]=c[position+size/2];
}
}
}
} |
21,864 | #include <iostream>
#include <math.h>
#include <time.h>
#include <stdlib.h>
#include <random>
#include <vector>
#include <chrono>
#include <deque>
#include <algorithm>
#include <iterator>
#include <set>
#define BLOCK_SIZE 1024
struct bstree {
int *left_child;
int *right_child;
int *parent;
bool *flag;
};
__global__ void populate_child_parent(float *arr, int *i_left_child, int *i_right_child, int *i_parent, int *o_left_child, int *o_right_child, int *o_parent, bool *flag, const int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
int y = i_parent[i];
if (i_left_child[i] == -1 && i_right_child[i] == -1 && i != y) {
flag[0] = true;
int x, p;
if (arr[i] <= arr[y]) {
p = i_left_child[y];
x = (p != -1) ? p:y;
o_parent[i] = x;
}
else {
p = i_right_child[y];
x = (p != -1) ? p:y;
o_parent[i] = x;
}
if (i != x) {
if (arr[i] <= arr[x]) {
if (o_left_child[x] == -1) {
o_left_child[x] = i;
}
}
else {
if (o_right_child[x] == -1) {
o_right_child[x] = i;
}
}
}
}
}
}
__global__ void copy_arr(int *in_arr, int *out_arr, const int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
out_arr[i] = in_arr[i];
}
}
void copy_array(int *arr1, int *arr2, const int n) {
cudaStream_t stream;
cudaStreamCreate(&stream);
copy_arr<<<(n + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE, 0, stream>>>(arr1, arr2, n);
cudaDeviceSynchronize();
cudaStreamDestroy(stream);
}
void random_vector(float *arr, const int n, const float min_val=0.0, const float max_val=1000.0) {
static std::random_device rd;
static std::mt19937 mte(rd());
std::uniform_real_distribution<float> dist(min_val, max_val);
for (int i = 0; i < n; i++) {
arr[i] = dist(mte);
}
}
bstree construct_binary_tree(float *arr, bstree g, bstree g1, const int n) {
copy_array(g.left_child, g1.left_child, n);
copy_array(g.right_child, g1.right_child, n);
copy_array(g.parent, g1.parent, n);
g1.flag[0] = false;
populate_child_parent<<<(n + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE>>>(arr, g.left_child, g.right_child, g.parent, g1.left_child, g1.right_child, g1.parent, g1.flag, n);
cudaDeviceSynchronize();
return g1;
}
bstree bs_tree(float *arr, int root_index, bstree g, bstree g1, const int n) {
g.flag[0] = true;
while (g.flag[0]) {
g = construct_binary_tree(arr, g, g1, n);
}
return g;
}
float *traversal(float *arr, int *left_child, int *right_child, int root_index, const int n) {
int *stack = new int[n];
float *out = new float[n];
stack[0] = root_index;
int p = 1;
int i = 0;
std::set<int> visited;
while (p > 0) {
int curr_root = stack[p-1];
if (left_child[curr_root] != -1 && visited.find(left_child[curr_root]) == visited.end()) {
stack[p++] = left_child[curr_root];
}
else {
if (visited.find(curr_root) == visited.end()) {
out[i++] = arr[curr_root];
visited.insert(curr_root);
}
if (right_child[curr_root] != -1 && visited.find(right_child[curr_root]) == visited.end()) {
stack[p++] = right_child[curr_root];
}
else {
p -= 1;
}
}
}
return out;
}
float *inorder_traversal(float *arr, bstree g, bstree g1, const int n) {
int root_index = rand() % static_cast<int>(n);
std::fill(g.parent, g.parent+n, root_index);
auto t1 = std::chrono::high_resolution_clock::now();
g = bs_tree(arr, root_index, g, g1, n);
auto t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count();
std::cout << duration << std::endl;
return traversal(arr, g.left_child, g.right_child, root_index, n);
}
bool check_correctness(float *arr, float *pred_arr, const int n) {
std::sort(arr, arr+n);
for (int i = 0; i < n ; i++) {
if (arr[i] != pred_arr[i]) {
return false;
}
}
return true;
}
int main(void) {
int n = 1 << 25;
float *arr, *temp;
cudaMallocManaged(&arr, n*sizeof(float));
random_vector(arr, n, 0, 10000);
temp = new float[n];
std::copy(arr, arr+n, temp);
bstree g, g1;
cudaMallocManaged(&g.left_child, n*sizeof(int));
cudaMallocManaged(&g.right_child, n*sizeof(int));
cudaMallocManaged(&g.parent, n*sizeof(int));
cudaMallocManaged(&g.flag, sizeof(bool));
cudaMallocManaged(&g1.left_child, n*sizeof(int));
cudaMallocManaged(&g1.right_child, n*sizeof(int));
cudaMallocManaged(&g1.parent, n*sizeof(int));
cudaMallocManaged(&g1.flag, sizeof(bool));
std::fill(g.left_child, g.left_child+n, -1);
std::fill(g.right_child, g.right_child+n, -1);
auto t1 = std::chrono::high_resolution_clock::now();
float *pred = inorder_traversal(arr, g, g1, n);
auto t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count();
std::cout << duration << std::endl;
t1 = std::chrono::high_resolution_clock::now();
std::cout << check_correctness(temp, pred, n) << std::endl;
t2 = std::chrono::high_resolution_clock::now();
duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count();
std::cout << duration << std::endl;
cudaFree(arr);
cudaFree(g.left_child);
cudaFree(g.right_child);
cudaFree(g.parent);
cudaFree(g.flag);
cudaFree(g1.left_child);
cudaFree(g1.right_child);
cudaFree(g1.parent);
cudaFree(g1.flag);
return 0;
}
|
21,865 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float* var_4,float* var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24) {
float tmp_1 = -1.7407E-37f;
comp = tmp_1 * (+1.3653E-44f + (+0.0f - var_2 + (-1.3795E-44f * -0.0f / var_3)));
for (int i=0; i < var_1; ++i) {
var_4[i] = (var_6 / var_7 - (-1.1248E-42f / +1.6267E35f + +1.8051E2f));
var_5[i] = +1.0201E36f;
comp += var_5[i] + var_4[i] - +0.0f + logf(var_8 + (-1.3514E-17f + +1.2480E28f));
comp += var_9 - (-1.0346E9f - var_10 / +1.1852E-37f + (var_11 * -0.0f));
}
if (comp > var_12 * ldexpf(+0.0f, 2)) {
float tmp_2 = +1.9453E-36f + (var_13 - (-0.0f - +1.8371E-1f / sinf(var_14 + sqrtf(+1.5774E-36f + var_15 / (+1.9933E1f * (var_16 - (+1.0324E-44f * -0.0f)))))));
comp = tmp_2 * (+1.5923E-27f * +1.7855E19f);
}
if (comp == (-1.0437E34f / +1.6834E-37f + (var_17 - var_18))) {
float tmp_3 = -1.8511E-36f;
comp = tmp_3 + acosf(-0.0f);
comp += (var_19 * var_20 + powf(atan2f(log10f((var_21 + var_22)), +1.7069E27f / ceilf((var_23 - (+1.1874E35f / +0.0f / (-1.0745E-43f / -1.6644E-13f))))), acosf(-0.0f * -1.1551E-37f - var_24)));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float* tmp_5 = initPointer( atof(argv[5]) );
float* tmp_6 = initPointer( atof(argv[6]) );
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25);
cudaDeviceSynchronize();
return 0;
}
|
21,866 | #include "includes.h"
__device__ void add_gpu(int *device_var, int val) {
atomicAdd(device_var, val);
}
__global__ void add_gpu(int *device_arr, int device_idx, int val) {
device_arr[device_idx] += val;
//atomicAdd(&(device_arr[*device_idx]), val);
} |
21,867 | #include<stdio.h>
#include<stdbool.h>
typedef unsigned long long int ull;
__device__ bool getval(int v, ull id, ull ie){
if (v<0) v=-v;
if (v<=30) return (id & (1llu<<v)) ? true : false;
return (ie & (1llu<<(v-31))) ? true : false;
}
__device__ bool test(int n, int* raw, ull id, ull ie){
bool ret = true;
for (int i = 0; i < n; i+=3){
bool tmp = false;
for (int j = 0; j < 3; j++)
tmp |= (getval(raw[i+j], id, ie) ^ (raw[i+j] < 0));
ret &= tmp;
}
return ret;
}
__device__ void fillres(int m, bool* res, ull id, ull ie){
for (int i=1;i<=m;i++)
res[i] = getval(i, id, ie);
}
__global__ void bf(int n, int m, int* raw, bool* res, int* flag){
ull myid = blockIdx.x * 1024llu + threadIdx.x;
ull mxstate = (1llu<<m) - 1;
if (myid > mxstate) return;
ull end = 1;
if (m-30 > 0) end <<= m-30;
for (ull i = 0; i < end; i ++){
if (test(n, raw, myid<<1, i)){
if (!atomicExch(flag, 1))
fillres(m, res, myid<<1, i);
return;
}
if ((i & 0xff) == (myid & 0xff) && *flag)
return;
}
}
int main (){
int *rawd, *raw, *flag;
bool *resd, *res;
int n, m, mflag = 0;
scanf("%d%d", &n,&m);
n*=3;
raw = (int*)malloc(sizeof(int)*n);
res = (bool*)malloc(m+1);
for (int i=0;i<n;i++)
scanf("%d", raw+i);
cudaMalloc((void**)&rawd, sizeof(int)*n);
cudaMalloc((void**)&resd, m+1);
cudaMalloc((void**)&flag, sizeof(int));
cudaMemcpy(rawd, raw, sizeof(int)*n, cudaMemcpyHostToDevice);
cudaMemcpy(flag, &mflag, sizeof(int), cudaMemcpyHostToDevice);
bf<<<1048576,1024>>>(n, m, rawd, resd, flag);
cudaMemcpy(&mflag, flag, sizeof(int), cudaMemcpyDeviceToHost);
if (mflag){
cudaMemcpy(res, resd, m+1, cudaMemcpyDeviceToHost);
for (int i = 1; i <= m; i++)
printf("%d := %s\n", i, (res[i]?"true":"false"));
}
else printf("No satisfy!\n");
cudaFree(rawd);
cudaFree(resd);
cudaFree(flag);
free(raw);
free(res);
}
|
21,868 | #include <cuda.h>
#include <stdio.h>
__global__ void GetWeightKernel(float *input, int input_len, float *addr,
float *exclusive_weight, int num_of_exclusive_weight,
int *page_table_addr, int page_size, int num_of_weight_page, int start, int end)
{
int idx, page_num, page, offset;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < input_len;
i += blockDim.x * gridDim.x) {
idx = start+i;
page_num = idx / page_size;
page = page_table_addr[page_num];
offset = idx % page_size;
if (page < num_of_weight_page) {
input[i] = addr[page*page_size + offset];
} else {
input[i] = exclusive_weight[(page-num_of_weight_page)*page_size + offset];
}
}
}
extern "C" {
float *LoadExclusiveWeight(float *exclusive_weight_page, int num_of_exclusive_weight)
{
float *exclusive_weight;
cudaMalloc(&exclusive_weight, sizeof(float)*num_of_exclusive_weight);
cudaMemcpy(exclusive_weight, exclusive_weight_page,
sizeof(float)*num_of_exclusive_weight,
cudaMemcpyHostToDevice);
return exclusive_weight;
}
void FreeExclusiveWeight(float *exclusive_weight)
{
cudaFree(exclusive_weight);
}
void GetWeightKernelLauncher(float *input, int input_len,
float *exclusive_weight, int num_of_exclusive_weight,
float* addr,
int* page_table_addr, int page_size, int num_of_weight_page, int start, int end)
{
GetWeightKernel<<<32, 256>>>(input, input_len,
addr, exclusive_weight, num_of_exclusive_weight,
page_table_addr, page_size, num_of_weight_page, start, end);
cudaDeviceSynchronize();
}
}
|
21,869 | #include "includes.h"
__global__ void calculateMatrixFormulaSharedDynamic(int *a, int *b, int *res, int n)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= n || tidy >= n) {
return;
}
int tid = tidx * n + tidy;
extern __shared__ int arrays[];
int *s_a = arrays;
int *s_b = &arrays[size * size];
int *s_res = &s_b[size * size];
s_a[tid] = a[tid];
s_b[tid] = b[tid];
s_res[tid] = s_a[tid] - s_b[tid];
res[tid] = s_res[tid];
} |
21,870 | // Reference Reduction scan - Author: Jeiru Hu
#ifdef _WIN32
# define NOMINMAX
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <assert.h>
#define BLOCK_SIZE 1024
__device__ void warpreduce(volatile float *s_in, int threadId)
{
s_in[threadId]+=s_in[threadId+32];
if(threadId<16)s_in[threadId]+=s_in[threadId+16];
if(threadId<8)s_in[threadId]+=s_in[threadId+8];
if(threadId<4)s_in[threadId]+=s_in[threadId+4];
if(threadId<2)s_in[threadId]+=s_in[threadId+2];
if(threadId<1)s_in[threadId]+=s_in[threadId+1];
}
__global__ void reduction(float *g_data, float *d_out, int n)
{
int blockId = blockIdx.x;
int threadId = threadIdx.x;
//int bSize = blockDim.x;
__shared__ float s_in[1024];
int startSize = BLOCK_SIZE;
if(threadId < n){
s_in[threadId]=g_data[blockId*BLOCK_SIZE + threadId];
}
if(threadId >= n) {
s_in[threadId] = 0.0;
}
//synchronize the threads to make sure all the data is loaded
__syncthreads();
for(unsigned int i=startSize/2;i>32;i>>=1){
//add the second half of the data to the first half
if(threadId < i){
s_in[threadId] += s_in[threadId+i];
}
//synchronize the threads to make sure all the caculation is made
__syncthreads();
}
if(threadId<32)
warpreduce(s_in,threadId);
//copy the result back
if(threadId==0){
d_out[blockId] = s_in[0];
}
}
float reductionOnDevice(float *d_in, int num) {
int blockx = (num + BLOCK_SIZE - 1)/BLOCK_SIZE;
dim3 dimGrid(blockx, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
float *d_out;
assert(cudaSuccess == cudaMalloc(&d_out, blockx*sizeof(float)));
float *dd_out;
float *ddd_out;
float *ret = (float *)malloc(sizeof(float));
int t = (blockx == 1) ? num:BLOCK_SIZE;
reduction<<<dimGrid, dimBlock>>>(d_in, d_out, t);
if(blockx == 1) {
cudaMemcpy(ret, d_out, sizeof(float), cudaMemcpyDeviceToHost);
return ret[0];
}
if(blockx > BLOCK_SIZE){
//use several blocks in second level reduction
int blockxx = (blockx + BLOCK_SIZE -1)/BLOCK_SIZE;
dim3 dimGridd(blockxx, 1, 1);
int tt;
tt = (blockx == 1) ? num:BLOCK_SIZE;
cudaMalloc(&dd_out, blockxx*sizeof(float));
reduction<<<dimGridd, dimBlock>>>(d_out,dd_out,tt);
//can use a single block
cudaMalloc(&ddd_out, sizeof(float));
reduction<<<1, dimBlock>>>(dd_out,ddd_out,blockxx);
cudaMemcpy(ret, ddd_out, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dd_out);
cudaFree(ddd_out);
} else {
//can use a single block
cudaMalloc(&ddd_out, sizeof(float));
reduction<<<1, dimBlock>>>(d_out,ddd_out,blockx);
cudaMemcpy(ret, ddd_out, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(ddd_out);
}
cudaFree(d_out);
return ret[0];
}
float* read_array(const char* filename, int len) {
float *x = (float*) malloc(len * sizeof(float));
FILE *fp = fopen(filename, "r");
for( int i=0; i<len; i++){
int r=fscanf(fp,"%f",&x[i]);
if(r == EOF){
rewind(fp);
}
x[i]-=5;
}
fclose(fp);
return x;
}
/*
float* read_array(const char* filename, int len) {
float *x = (float*) malloc(len * sizeof(float));
FILE *fp = fopen(filename, "r");
for (int i = 0; i < len; i++) {
fscanf(fp, "%f", &x[i]);
}
fclose(fp);
return x;
}
*/
void computeSum( float* reference, float* idata, const unsigned int len)
{
reference[0] = 0;
double total_sum = 0;
unsigned int i;
for( i = 0; i < len; ++i)
{
total_sum += idata[i];
}
*reference = total_sum;
}
int main( int argc, char** argv)
{
if(argc != 2) {
fprintf(stderr, "usage: ./problem2 N\n");
exit(1);
}
int num_elements = atoi(argv[1]);
float* h_data=read_array("problem1.inp",num_elements);
float reference = 1.0f;
computeSum(&reference , h_data, num_elements);
int size = num_elements*sizeof(float);
float *d_in;
assert(cudaSuccess == cudaMalloc((void**)&d_in, size));
//start inclusive timing
float time;
cudaEvent_t startIn,stopIn;
cudaEventCreate(&startIn);
cudaEventCreate(&stopIn);
cudaEventRecord(startIn, 0);
assert(cudaSuccess == cudaMemcpy(d_in, h_data, size, cudaMemcpyHostToDevice));
//float result = computeOnDevice(h_data, num_elements);
float result = reductionOnDevice(d_in, num_elements);
//stop inclusive timing
cudaEventRecord(stopIn, 0);
cudaEventSynchronize(stopIn);
cudaEventElapsedTime(&time, startIn, stopIn);
cudaEventDestroy(startIn);
cudaEventDestroy(stopIn);
// Run accuracy test
float epsilon = 0.3f;
unsigned int result_regtest = (abs(result - reference) <= epsilon);
if(!result_regtest) printf("Test failed device: %f host: %f\n",result,reference);
//print the outputs
printf("%d\n%f\n%f\n",num_elements, result, time);
/* printf("%f,", time); */
// cleanup memory
cudaFree(d_in);
//cudaFree(d_out);
free( h_data);
return 0;
}
|
21,871 | #include <cstdlib>
#include <iostream>
#include "cuda_runtime.h"
#include <ctime>
using namespace std;
#define NUM_ELEMENTS 512 * 1000
__global__ void vecAddDevice(float * A, float * B, float * C) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i] = A[i] + B[i];
}
int main() {
float * hA, * hB, * hC;
float * dA, * dB, * dC;
int size = NUM_ELEMENTS * sizeof(float);
int device;
char ch;
cudaDeviceProp deviceProperties;
hA = new float[NUM_ELEMENTS];
hB = new float[NUM_ELEMENTS];
hC = new float[NUM_ELEMENTS];
// get device properties
cudaGetDevice(&device);
cudaGetDeviceProperties(&deviceProperties, device);
cout << "Multiprocessors count: " << deviceProperties.multiProcessorCount << endl;
cout << "Warp size: " << deviceProperties.warpSize << endl;
cout << "Max Threads per Block: " << deviceProperties.maxThreadsPerBlock << endl;
int numBlocks = NUM_ELEMENTS / deviceProperties.maxThreadsPerBlock;
int threadsPerBlock = deviceProperties.maxThreadsPerBlock;
// init vectors
for(int i = 0; i < NUM_ELEMENTS; i++) {
hA[i] = rand() / (float) RAND_MAX;
hB[i] = rand() / (float) RAND_MAX;
hC[i] = 0.0f;
}
cout << "Allocate device memory..." << endl;
// allocate device memory
cudaMalloc(&dA, size);
cudaMalloc(&dB, size);
cudaMalloc(&dC, size);
// copy data to device memory
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice);
cout << "Starting kernel..." << endl <<
"Blocks: " << numBlocks << endl <<
"Threads per block: " << threadsPerBlock << endl;
clock_t t1 = clock();
vecAddDevice<<<numBlocks, threadsPerBlock>>>(dA, dB, dC);
cudaError_t e = cudaThreadSynchronize();
if(e == cudaSuccess)
cout << "Done." << endl;
else
cout << "Error: " << cudaGetErrorString(e) << endl;
clock_t t2 = clock() - t1;
double t = ((double)t2 / CLOCKS_PER_SEC * 1000.0);
cout << "Time elapsed: " << t << " ms" << endl;
cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost);
cout << "Freeing device memory..." << endl;
// free device memory
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
cin >> ch;
return 0;
}
|
21,872 | #include <iostream>
#include <iomanip>
#include <vector>
#include <string>
#include <fstream>
using namespace std;
void Linspace(double*, double, double, int);
void Uniform(double*, double, int);
__global__ void RungeKuttaStepOriginal(double* __restrict__, const double* __restrict__, int);
__global__ void RungeKuttaStepRegisterFriendly(double* __restrict__, const double* __restrict__, int);
__device__ void Lorenz(double* __restrict__, const double* __restrict__, double);
template <class DataType>
DataType* AllocateHostMemory(int);
template <class DataType>
DataType* AllocateDeviceMemory(int);
int main()
{
// INITIAL SETUP ----------------------------------------------------------------------------------
int NumberOfProblems = 768000;
int NumberOfThreads = NumberOfProblems;
int BlockSize = 64;
cudaSetDevice(1);
double* h_State = AllocateHostMemory<double>( 3*NumberOfProblems );
double* h_Parameters = AllocateHostMemory<double>( NumberOfProblems );
double* d_State = AllocateDeviceMemory<double>( 3*NumberOfProblems );
double* d_Parameters = AllocateDeviceMemory<double>( NumberOfProblems );
Linspace(h_Parameters, 0.0, 21.0, NumberOfProblems);
Uniform(h_State, 10.0, NumberOfProblems);
Uniform(&h_State[ NumberOfProblems ], 10.0, NumberOfProblems);
Uniform(&h_State[ 2*NumberOfProblems ], 10.0, NumberOfProblems);
cudaMemcpy(d_State, h_State, 3*sizeof(double)*NumberOfProblems, cudaMemcpyHostToDevice);
cudaMemcpy(d_Parameters, h_Parameters, sizeof(double)*NumberOfProblems, cudaMemcpyHostToDevice);
int GridSize = NumberOfThreads/BlockSize + (NumberOfThreads % BlockSize == 0 ? 0:1);
clock_t SimulationStart;
clock_t SimulationEnd;
SimulationStart = clock();
RungeKuttaStepRegisterFriendly<<<GridSize, BlockSize>>> (d_State, d_Parameters, NumberOfProblems);
cudaDeviceSynchronize();
SimulationEnd = clock();
cout << "Simulation time: " << 1000.0*(SimulationEnd-SimulationStart) / CLOCKS_PER_SEC << "ms" << endl << endl;
cout << "Simulation time / 1000 RK4 step: " << 1000.0*(SimulationEnd-SimulationStart) / CLOCKS_PER_SEC << "ms" << endl;
cout << "Ensemble size: " << NumberOfProblems << endl << endl;
cudaMemcpyAsync(h_State, d_State, 3*sizeof(double)*NumberOfProblems, cudaMemcpyDeviceToHost);
//for (int i=0; i<NumberOfProblems; i++)
// cout << "P: " << h_Parameters[i] << " Sates: " << h_State[i] << ", " << h_State[i+NumberOfProblems] << ", " << h_State[i+2*NumberOfProblems] << endl;
}
// AUXILIARY FUNCTION -----------------------------------------------------------------------------
void Linspace(double* x, double B, double E, int N)
{
double Increment;
x[0] = B;
if ( N>1 )
{
x[N-1] = E;
Increment = (E-B)/(N-1);
for (int i=1; i<N-1; i++)
{
x[i] = B + i*Increment;
}
}
}
void Uniform(double* x, double V, int N)
{
for (int i=0; i<N; i++)
{
x[i] = V;
}
}
__forceinline__ __device__ void Lorenz(double* __restrict__ F, const double* __restrict__ X, double P)
{
F[0] = 10.0*(X[1] - X[0]);
F[1] = P*X[0] - X[1] - X[0]*X[2];
F[2] = X[0]*X[1] - 2.666 * X[2];
}
__global__ void RungeKuttaStepRegisterFriendly(double* __restrict__ d_State, const double* __restrict__ d_Parameters, int N)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N)
{
double X[3];
double P;
double k1[3];
double ks[3];
double x[3];
double dT = 1e-3;
double dTp2 = 0.5*dT;
double dTp6 = dT * (1.0/6.0);
X[0] = d_State[tid];
X[1] = d_State[tid + N];
X[2] = d_State[tid + 2*N];
P = d_Parameters[tid];
for (int i=0; i<1000; i++)
{
Lorenz(k1, X, P);
#pragma unroll 3
for (int j=0; j<3; j++)
{
x[j] = X[j] + dTp2*k1[j];
ks[j] = k1[j];
}
Lorenz(k1, x, P);
#pragma unroll 3
for (int j=0; j<3; j++)
{
x[j] = X[j] + dTp2*k1[j];
ks[j] = ks[j]+2.0*k1[j];
}
Lorenz(k1, x, P);
#pragma unroll 3
for (int j=0; j<3; j++)
{
x[j] = X[j] + dT*k1[j];
ks[j] = ks[j]+2.0*k1[j];
}
Lorenz(k1, x, P);
#pragma unroll 3
for (int j=0; j<3; j++)
X[j] = X[j] + dTp6*( ks[j] + k1[j] );
}
d_State[tid] = X[0];
d_State[tid + N] = X[1];
d_State[tid + 2*N] = X[2];
}
}
template <class DataType>
DataType* AllocateHostMemory(int N)
{
DataType* HostMemory = new (std::nothrow) DataType [N];
if (HostMemory == NULL)
{
std::cerr << "Failed to allocate Memory on the HOST!\n";
exit(EXIT_FAILURE);
}
return HostMemory;
}
template <class DataType>
DataType* AllocateDeviceMemory(int N)
{
cudaError_t Error = cudaSuccess;
DataType* MemoryAddressInDevice = NULL;
Error = cudaMalloc((void**)&MemoryAddressInDevice, N * sizeof(DataType));
if (Error != cudaSuccess)
{
std::cerr << "Failed to allocate Memory on the DEVICE!\n";
exit(EXIT_FAILURE);
}
return MemoryAddressInDevice;
} |
21,873 | #include "includes.h"
__global__ void _dev_saxpy()
{
return;
} |
21,874 | #include <time.h>
#include <cuda.h>
#include <stdio.h>
#define STOP 0
#define START 1
#define BLOCKSIZE 256
extern "C" void chrono (int kind, float *time);
__global__ void kconvol (float *gpu_a, float *gpu_b, int n) {
int i, j, l;
// TO DO : evaluate the global 1D index l of the current thread,
// using blockDim, blockIdx and threadIdx.
l = threadIdx.x + blockIdx.x * blockDim.x;
int bIdx = (blockIdx.x);
int cDim = (blockDim.x);
int TIdx = (threadIdx.x);
// TO DO : evaluate global indices of thread (i,j) from the index l
j = l % n;
i = l / n;
//printf("%d\n", l);
//printf("---------\nl = %d, (i,j) = (%d,%d)\nblockIdx: %d\nblockDim: %d\nthreadIdx: %d\n", l, i, j, bIdx, cDim, TIdx);
if ((i >= n) || (j >= n)) return;
if ((i == 0) || (j == 0) || (i == n-1) || (j == n-1)) {
gpu_b[l] = gpu_a[l]; // edges are untouched
}
else
// TO DO : fill up the MISSING indices below
gpu_b[l]=(1./5.)*(gpu_a[l-n] + gpu_a[l-1] + gpu_a[l] + gpu_a[l+1]+ gpu_a[l+n]);
}
extern "C" void gpu_convol (float *a, float *b, int n, int blocks) {
float *gpu_a;
float *gpu_b;
cudaError_t err;
float time;
err = cudaMalloc (&gpu_a, n*n*sizeof(float));
if (err != 0) {
printf ("Error allocating gpu_a: %s\n", cudaGetErrorString (err));
exit (1);
}
err = cudaMalloc (&gpu_b, n*n*sizeof(float));
if (err != 0) {
printf ("Error allocating gpu_b: %s\n", cudaGetErrorString (err));
exit (1);
}
cudaMemcpy (gpu_a, a, n*n*sizeof(float), cudaMemcpyHostToDevice);
// NOTE : the chronometer below does not contemplate overhead of memory allocation and
// memory transfer.
chrono (START, &time);
// TO DO : the number of blocks is missing below in the kernel invocation
//int blocks = (1000192) / BLOCKSIZE;
printf("block: %d\n", blocks);
printf("blocksize: %d\n", BLOCKSIZE);
kconvol <<<blocks,BLOCKSIZE>>> (gpu_a, gpu_b, n);
err=cudaDeviceSynchronize ();
chrono (STOP, &time);
printf ("Convolution took %f sec. on GPU\n", time);
cudaMemcpy (b, gpu_b, n*n*sizeof(float), cudaMemcpyDeviceToHost);
if (err != 0) {
printf ("%s\n", cudaGetErrorString (err));
exit (1);
}
FILE *fp;
fp = fopen("timing_plot_1000x1000.out", "a");
fprintf(fp, "%d, %.10g\n", blocks, time);
cudaFree (gpu_a);
cudaFree (gpu_b);
}
|
21,875 | #include "includes.h"
__global__ void getRow_IntId_naive(const float * A, int row_id, float * out, int Acols) {
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < Acols) {
out[id] = A[id + row_id*Acols];
}
} |
21,876 | //pass
//--blockDim=2 --gridDim=1
__global__ void foo(char **argument)
{
}
|
21,877 | #include <cuda.h>
#include <stdio.h>
#define N 32
// função executada na GPU
__global__ void vecAdd (int *Da, int *Db, int *Dc) {
int i = threadIdx.x;
Dc[i] = Da[i] + Db[i];
}
// função executada na CPU
__host__ void initvet(int *host_a, int *host_b) {
// Inicialização dos vetores a e b
for (int i=0; i < N; i++) {
host_a[i] = N-i;
host_b[i] = i;
}
}
// função executada na CPU
__host__ void printvetores (int *a, int *b, int *c) {
printf("\t [i] \t A\t B\t C\t \n");
for (int i=0; i < N; i++)
printf("\t [%d] \t %d\t %d\t %d\n", i, a[i], b[i], c[i]);
}
// função principal executada iniciada em CPU
int main(int argc, char const *argv[]) {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size;
size = sizeof(int)*N;
// alocação de memória no HOST para os vetores (a,b e c)
// a = (int *) malloc (size);
b = (int *) malloc (size);
// c = (int *) malloc (size);
cudaMallocHost((void **) &a, size);
// cudaMallocHost((void **) &b, size);
cudaMallocHost((void **) &c, size);
// Inicialização dos vetores
initvet(a,b);
// alocação de memória na GPU para os vetores (a,b e c)
cudaMalloc ((void **) &dev_a, size);
cudaMalloc ((void **) &dev_b, size);
cudaMalloc ((void **) &dev_c, size);
// cópia dos vetores gerados em CPU p/ memória da GPU
cudaMemcpy (dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy (dev_b, b, size, cudaMemcpyHostToDevice);
// execução do kernel vecAdd em GPU.
vecAdd<<<1,N>>>(dev_a, dev_b, dev_c);
// cópia do vetor de resultado calculado em GPU p/ memória do HOST
cudaMemcpy (c, dev_c, size, cudaMemcpyDeviceToHost);
// impressão dos vetores
printvetores (a, b, c);
printf ("\n **** \n Nro Threads = %d\n Nro de Blocos = 1\n", N);
// Libera memória da GPU
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
// Libera memória no HOST
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(c);
return 0;
}
|
21,878 | #include "includes.h"
__device__ float step_function(float v) //Sigmoid function::Activation Function
{
return 1 / (1 + exp(-v));
}
__global__ void apply_step_function(float *input, float *output, const int N)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
output[idx] = step_function(input[idx]);
}
} |
21,879 | /*
============================================================================
Name : cuda_lock.cu
Author : vuongp
Version :
Copyright : Your copyright notice
Description : CUDA thread wide lock, this code works well at the moment but
there is no guarantee that it will work with all GPU architecture.
============================================================================
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", \
cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
__device__ int mLock = 0;
__global__ void func(unsigned int *comm) {
bool blocked = true;
while(blocked) {
if(0 == atomicCAS(&mLock, 0, 1)) {
printf("Block Id = %d, Thread Id = %d acquired lock\n", blockIdx.x, threadIdx.x);
*comm += 1;
printf("Block Id = %d, Thread Id = %d, comm = %u\n", blockIdx.x, threadIdx.x, *comm);
atomicExch(&mLock, 0);
printf("Block Id = %d, Thread Id = %d released lock\n", blockIdx.x, threadIdx.x);
blocked = false;
}
}
}
int main(void)
{
unsigned int *d_comm;
gpuErrchk(cudaMalloc(&d_comm, sizeof(unsigned int)));
gpuErrchk(cudaMemset(d_comm, 0, sizeof(unsigned int)));
func<<<10, 64>>>(d_comm);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaPeekAtLastError());
return 0;
}
|
21,880 | #include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <stdlib.h>
#include <ctime>
int main ()
{
srand(time(NULL));
thrust::device_vector<int> dv(0);
thrust::host_vector<int> hv(0);
for (int i = 0; i < 5; ++i) {
hv.push_back(rand() % 101);
}
dv = hv;
thrust::sort(dv.begin(), dv.end());
float sum = thrust::reduce(dv.begin(), dv.end());
std::cout << "Average is " << sum / 5.0f << std::endl;
return 0;
}
|
21,881 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#define BLOCK_SIZE 512
__global__ void reduction(float *out, float *in, unsigned size)
{
/********************************************************************
Load a segment of the input vector into shared memory
Traverse the reduction tree
Write the computed sum to the output vector at the correct index
********************************************************************/
// INSERT KERNEL CODE HERE
// naive version
/* __shared__ float partialSum[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
partialSum[t] = in[start + t];
partialSum[blockDim.x+t] = in[start + blockDim.x + t];
for(unsigned int stride = 1; stride <= blockDim.x ; stride*=2)
{
__syncthreads();
if(t % stride == 0)
partialSum[2*t]+= partialSum[2*t+stride];
}
if(t==0)
out[blockIdx.x] = partialSum[0];
*/
// optimized version
__shared__ float partialSum[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
if(start+t<size)
partialSum[t] = in[start + t];
else
partialSum[t]=0.0;
if(blockDim.x+start+t<size)
partialSum[blockDim.x+t] = in[start + blockDim.x + t];
else
partialSum[blockDim.x+t]=0.0;
for(unsigned int stride = blockDim.x; stride>0 ; stride/=2)
{
__syncthreads();
if(t<stride)
partialSum[t]+= partialSum[t+stride];
}
if(t==0)
out[blockIdx.x] = partialSum[0];
}
|
21,882 | #include<stdio.h>
#include<cuda.h>
#include <cuda_runtime.h>
#define N (1024*1024)
#define M (1000000)
__global__ void cudakernel(float *buf)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
buf[i] = 1.0f * i / N;
for(int j = 0; j < M; j++)
buf[i] = buf[i] * buf[i] - 0.25f;
}
int main()
{
float data[N]; int count = 0;
float *d_data;
cudaMalloc(&d_data, N * sizeof(float));
cudakernel<<<N/256, 256>>>(d_data);
cudaMemcpy(data, d_data, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_data);
int sel;
printf("Enter an index: ");
scanf("%d", &sel);
printf("data[%d] = %f\n", sel, data[sel]);
}
|
21,883 | # include <cuda.h>
# include <cuda_runtime.h>
extern "C"
unsigned char * RGB2HSV(unsigned char * data, int npixels);
__global__ void RGB2HSVcuda(unsigned char * dataRGBdev, unsigned char * dataHSVdev, int npixels){
int posThread = blockIdx.x*blockDim.x + threadIdx.x;
// ** Size, just consider the number of pixel, non the total of data,
// ** But in dataRGBdev is all channels data
if(posThread < npixels){
// Get the maximun & minimun value of RGB pixel
unsigned char max = 0;
unsigned char min = 255;
int position = posThread*3;
// Max pos represents 0: R, 1: G, 2: B
int maxpos = 0;
for(int i = 0; i < 3; i++){
int pos = position + i;
if(dataRGBdev[pos] > max){
max = dataRGBdev[pos];
maxpos = i;
}
else if(dataRGBdev[pos] < min)
min = dataRGBdev[pos];
}
int diff = (max - min > 0)? max - min: 1;
//set H position
if(maxpos == 0)
dataHSVdev[position] = (dataRGBdev[position + 1] - dataRGBdev[position + 2])*60/diff;
else if(maxpos == 1)
dataHSVdev[position] = (dataRGBdev[position +2] - dataRGBdev[position])*60/diff + 120;
else
dataHSVdev[position] = (dataRGBdev[position] - dataRGBdev[position + 1])*60/diff +240;
// Set S position
dataHSVdev[position +1 ] = (max == 0)? 0 : (max-min)/max;
// Set V Position
dataHSVdev[position + 2] = max;
}
}
unsigned char * RGB2HSV(unsigned char * data, int npixels){
unsigned char * hsv = new unsigned char[npixels*3];
unsigned char * hsvDev;
unsigned char * dataDev;
cudaMalloc((void**)&hsvDev, npixels*3*sizeof(unsigned char));
cudaMalloc((void**)&dataDev, npixels*3*sizeof(unsigned char));
cudaMemcpy(dataDev, data, 3*npixels*sizeof(unsigned char), cudaMemcpyHostToDevice);
int nThreads = 1024;
int nBlocks = (npixels % nThreads > 0) ? npixels/nThreads + 1: npixels/nThreads;
RGB2HSVcuda<<<nBlocks, nThreads>>>(dataDev, hsvDev, npixels);
cudaMemcpy(hsv, hsvDev, 3*npixels*sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(dataDev);
cudaFree(hsvDev);
return hsv;
}
int main(){
} |
21,884 | #include <stdio.h>
__global__
void hello(int k) {
printf("my thread number: %d %d\n", threadIdx.x, blockIdx.x);
printf("Argument: %d\n", k);
}
int main() {
hello<<<2,16>>>(5);
cudaDeviceSynchronize();
}
|
21,885 | #include <stdio.h>
using namespace std;
#define BLOCK_SIZE 16
#define GRID_SIZE 1
__global__
void GScale(float* img, float* res, int iRow, int iCol, int id){
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
int a = blockIdx.z*blockDim.z + threadIdx.z;
if (col < iCol && row < iRow && a<id){
res[row*iCol+col+a*iCol*iRow]=2.0*img[row*iCol+col+a*iCol*iRow];
}
}
__host__
int main(void)
{
int N = 6;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*N*N*sizeof(float));
y = (float*)malloc(N*N*N*sizeof(float));
cudaMalloc((void**)&d_x, N*N*N*sizeof(float));
cudaMalloc((void**)&d_y, N*N*N*sizeof(float));
for (int i = 0; i < N*N*N; i++) {
x[i] = 5.0;
//y[i] = 0.0;
}
dim3 dimBlock(16,16,16);
dim3 dimGrid((N-1)/16+1, (N-1)/16+1, (N-1)/16+1);
//dim3 dimGrid(1, 1, 1);
cudaMemcpy(d_x, x, N*N*N*sizeof(float), cudaMemcpyHostToDevice);
GScale<<<dimBlock, dimGrid>>>(d_x, d_y, N, N, N);
cudaMemcpy(y, d_y, N*N*N*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < N*N*N; i++){
printf("%f - ", y[i]);
}
cudaFree(d_x);
cudaFree(d_y);
}
|
21,886 | #include "includes.h"
__global__ void permuteInitialAdjacencyKernel(int size, int *adjIndexesIn, int *adjacencyIn, int *permutedAdjIndexesIn, int *permutedAdjacencyIn, int *ipermutation, int *fineAggregate)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int oldBegin = adjIndexesIn[ipermutation[idx]];
int oldEnd = adjIndexesIn[ipermutation[idx] + 1];
int runSize = oldEnd - oldBegin;
int newBegin = permutedAdjIndexesIn[idx];
//int newEnd = permutedAdjIndexesIn[idx + 1];
//int newRunSize = newEnd - newBegin;
//printf("Thread %d is copying from %d through %d into %d through %d\n", idx, oldBegin, oldEnd, newBegin, newEnd);
// Transfer old adjacency into new, while changing node id's with partition id's
for(int i = 0; i < runSize; i++)
{
permutedAdjacencyIn[newBegin + i] = fineAggregate[ adjacencyIn[oldBegin + i] ];
}
}
} |
21,887 | /***
This script is an example of usign CUDA Thrust library.
***/
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
using namespace std;
int main(void)
{
thrust::host_vector<int> v;
v.push_back(1);
v.push_back(2);
v.push_back(3);
v.push_back(4);
for (int i = 0; i < v.size(); i++)
cout << "v[" << i << "] == " << v[i] << endl;
thrust::device_vector<int> v_gpu = v;
v_gpu.push_back(5);
for (int i = 0; i < v_gpu.size(); i++)
std::cout << "v_gpu[" << i << "] == " << v_gpu[i] << std::endl;
return 0;
}
|
21,888 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime_api.h>
#include <curand.h>
#include "curand_kernel.h"
#include <assert.h>
// L should be (multiple of (THR_NUMBER - 2) ) + 2
const int THR_NUMBER = 30;
#define SETBLOCKNUM 5
// #define L 122
const int L = (THR_NUMBER -2)* SETBLOCKNUM +2;
// #define MULTISPIN unsigned char
#define MULTISPIN unsigned int
const int MULTISIZE = sizeof(MULTISPIN) *8;
#define T_CYCLE_START 2.26
#define T_CYCLE_END 2.275
#define T_CYCLE_STEP 0.002
#define SINGLETEMP 2.26918531
int n_temps = ( T_CYCLE_END - T_CYCLE_START )/ (T_CYCLE_STEP);
#define J 1.
#define SEED 1000
const int AREA = L*L;
const int NTOT = (L-2)*(L-2);
// static const float EXP4_TRESHOLD = exp( -(4.*J) / T);
// static const float EXP8_TRESHOLD = exp( -(8.*J) / T);
#define STEPS_REPEAT 3
#define T_MAX_SIM 100
#define T_MEASURE_WAIT 20
#define T_MEASURE_INTERVAL 10
// print history true/false
#define HISTORY 1
const int BLOCK_NUMBER = ( L-2)/( THR_NUMBER - 2 );
const dim3 BLOCKS( BLOCK_NUMBER, BLOCK_NUMBER );
const dim3 THREADS( THR_NUMBER, THR_NUMBER );
// average tracker struct
struct avg_tr {
float sum;
float sum_squares;
int n;
};
struct avg_tr new_avg_tr(int locn) {
struct avg_tr a = { .sum = 0, .sum_squares = 0, .n = locn};
return a;
}
void update_avg(struct avg_tr * tr_p, float newval) {
tr_p->sum += newval;
tr_p->sum_squares += (newval*newval);
}
float average( struct avg_tr tr) {
return (tr.sum)/((float) tr.n) ;
}
float stdev( struct avg_tr tr) {
return sqrt( ( tr.sum_squares)/((float) tr.n) - pow(( (tr.sum)/((float) tr.n) ),2) );
}
// float variance( struct avg_tr tr) {
// return ( ( tr.sum_squares)/((float) tr.n) - pow(( (tr.sum)/((float) tr.n) ),2) );
// }
// RNG init kernel
__global__ void initRNG(curandState * const rngStates, const int seed) {
// Determine thread ID
int blockId = blockIdx.x+ blockIdx.y * gridDim.x;
int tid = blockId * (blockDim.x * blockDim.y)+ (threadIdx.y * blockDim.x)+ threadIdx.x;
// Initialise the RNG
curand_init(seed, tid, 0, &rngStates[tid]);
}
struct coords {
int x;
int y;
};
__device__ coords dev_get_thread_coords() {
struct coords thread_coords;
thread_coords.x = blockIdx.x*( THR_NUMBER - 2 ) + ( threadIdx.x ) ;
thread_coords.y = blockIdx.y*( THR_NUMBER - 2 ) + ( threadIdx.y ) ;
return thread_coords;
}
// float unitrand(){
// return (float)rand() / (float)RAND_MAX;
// }
__device__ float dev_unitrand( curandState * const rngStates, unsigned int tid ){
curandState localState = rngStates[tid];
float val = curand_uniform(&localState);
rngStates[tid] = localState;
return val;
}
// index has to be less that MULTISIZE
__device__ void dev_set_spin_1 (MULTISPIN * multi, int index) {
*multi |= 1 << index;
}
__device__ void dev_set_spin_0 (MULTISPIN * multi, int index) {
*multi &= ~(1 << index);
}
__device__ MULTISPIN dev_read_spin(MULTISPIN multi, int index) {
return ( (multi >> index) & 1 );
}
// each bit exp8 and exp8 describes the Metropolis RNG result for that bit,
// specifying if the random r is bigger or smaller than the relevant values e^(4J/kT) and e^(8J/kT) (passed from outside)
__device__ MULTISPIN generate_exp4_mask(float exp4, float exp8, curandState * const rngStates, int tid ) {
MULTISPIN res;
for(int k=0; k<MULTISIZE; k++) {
float random_number = dev_unitrand(rngStates, tid);
if( exp4 > random_number && random_number > exp8) { // this is taken from the article and works. the version below might not but slightly simplifies some things
// if( exp4 > random_number) {
dev_set_spin_1(&res, k);
} else {
dev_set_spin_0(&res, k);
}
}
return res;
}
__device__ MULTISPIN generate_exp8_mask(float exp8, curandState * const rngStates, int tid ) {
MULTISPIN res;
for(int k=0; k<MULTISIZE; k++) {
float random_number = dev_unitrand(rngStates, tid);
if( random_number < exp8 ) {
dev_set_spin_1(&res, k);
} else {
dev_set_spin_0(&res, k);
}
}
return res;
}
MULTISPIN init_random_multispin() {
return (MULTISPIN) rand(); // just spam random bits
}
void init_random_grid(MULTISPIN grid[L*L]) {
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
grid[x+y*L] = init_random_multispin();
}
}
}
MULTISPIN init_t0_multispin() {
return (MULTISPIN) 0; // should be all zeros for all sensible multispin types
}
void init_t0_grid(MULTISPIN grid[L*L]) {
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
grid[x+y*L] = init_t0_multispin();
}
}
}
// can segfault
__device__ static inline MULTISPIN dev_shared_grid_step(MULTISPIN shared_grid[THR_NUMBER*THR_NUMBER], int x, int y, int xstep, int ystep) {
return shared_grid[(x+xstep) + (y+ystep)*THR_NUMBER];
}
// segfault if applied to an edge spin, must be called only on the inner L-1 grid
__device__ void dev_update_multispin_shared(MULTISPIN grid[THR_NUMBER*THR_NUMBER], int x, int y, float exp4, float exp8, curandState * const rngStates, int tid ) {
MULTISPIN s0 = grid[x+y*THR_NUMBER];
MULTISPIN exp4_mask = generate_exp4_mask(exp4, exp8, rngStates, tid ); // here
MULTISPIN exp8_mask = generate_exp8_mask(exp8, rngStates, tid );
// "energy variables" indicating whether s0 is equal or opposite to each of its 4 neighbours
MULTISPIN i1 = s0 ^ dev_shared_grid_step(grid, x, y, 1, 0);
MULTISPIN i2 = s0 ^ dev_shared_grid_step(grid, x, y, -1, 0);
MULTISPIN i3 = s0 ^ dev_shared_grid_step(grid, x, y, 0, 1);
MULTISPIN i4 = s0 ^ dev_shared_grid_step(grid, x, y, 0, -1);
// bit sums with carry over between the i variables
MULTISPIN j1 = i1 & i2;
MULTISPIN j2 = i1 ^ i2;
MULTISPIN j3 = i3 & i4;
MULTISPIN j4 = i3 ^ i4;
// logic for deciding whether to flip s0 or not
MULTISPIN flip_mask = ( ((j1 | j3) | (~(j1^j3) & (j2&j4)) ) | ((j2 | j4) & exp4_mask ) | exp8_mask );
grid[x+y*THR_NUMBER] = grid[x+y*THR_NUMBER] ^ flip_mask;
// explanation:
// spins | i1234 | deltaE | j1 j2 j3 j4 |
// 1 | 1 | | |
// 101 | 1 1 | -8 | 1 0 1 0 |
// 1 | 1 | | |
//
// 0 | 0 | | |
// 101 | 1 1 | -4 | 0 1 1 0 | (j1 | j3)
// 1 | 1 | | |
//
// 0 | 0 | | 0 0 1 0 |
// 001 | 0 1 | 0 | or |-------------------------
// 1 | 1 | | 0 1 0 1 | ~(j1^j3) & (j2&j4))
//------------------------------------------------------------------
//
// 0 | 0 | | |
// 000 | 0 0 | +4 | | (j2 | j4) & exp4
// 1 | 1 | | |
//------------------------------------------------------------------
//
// 0 | 0 | | |
// 000 | 0 0 | +8 | 0 0 0 0 | exp8
// 0 | 0 | | |
// the first 2 cases are detected by (j1 | j3) and lead to the spin flip regardless of the RNG roll.
// the deltaH = 0 case can result in two different forms for the j's depending on ho the spins are paired.
// the first of these is correctly picked up by (j1 | j3), while the second needs its own expression ~(j1^j3) & (j2&j4))
// in the 4th case, detected by (j2 | j4), the spin is flipped only if the RNG roll is lucky enough (exp4 = 1)
// if we still haven't flipped, we get to the last case. here the spin is flipped only if the RNG roll gives the luckiest result (exp8 = 1).
}
// non GPU function
void multidump_first(MULTISPIN grid[L*L]) {
// printf("first bit grid (out of %i):\n", MULTISIZE);
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
if(( grid[x+y*L] & 1 ) == 0) printf(" ");
else printf("█");
}
printf("\n");
}
printf("\n");
}
// non GPU function
void multidump_a_few(MULTISPIN grid[L*L]) {
for(int k=0; k<5; k++) {
printf("grid on bit %i (out of %i):\n", k, MULTISIZE);
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
if(( grid[x+y*L] & (1 << k) ) == 0) printf(" ");
else printf("█");
}
printf("\n");
}
printf("\n");
}
}
__global__ void dev_measure_cycle_kernel(MULTISPIN * dev_grid, curandState * const rngStates, float * dev_single_run_avgs, int * dev_partial_res, float exp4, float exp8, int ksim ) {
// setup
struct coords glob_coords = dev_get_thread_coords();
int glob_x = glob_coords.x;
int glob_y = glob_coords.y;
// Determine thread ID (for RNG)
int blockId = blockIdx.x+ blockIdx.y * gridDim.x;
int tid = blockId * (blockDim.x * blockDim.y)+ (threadIdx.y * blockDim.x)+ threadIdx.x;
__shared__ MULTISPIN shared_grid[ THR_NUMBER*THR_NUMBER ];
shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] = dev_grid[(glob_x )+ (glob_y )*L ];
__syncthreads();
// allocate shared memory for measure results
// magnetization
__shared__ int blocksum[ MULTISIZE ];
if ( threadIdx.x == 0 && threadIdx.y == 0 ) {
for (int multik=0; multik<MULTISIZE; multik++) {
blocksum[ multik ] = 0;
}
}
__syncthreads();
////////////////////////////////////////////
////// measure
////////////////////////////////////////////
if(ksim > T_MEASURE_WAIT && ksim % T_MEASURE_INTERVAL == 0) {
// this condition does not depend on the thread id in any way
for (int multik=0; multik<MULTISIZE; multik++) {
// magnetization
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
int lspin = (int) dev_read_spin(shared_grid[threadIdx.x + threadIdx.y*THR_NUMBER], multik );
atomicAdd( &(blocksum[ multik ]), lspin ); // change with pointer arithm
}
__syncthreads();
if ( threadIdx.x == 0 && threadIdx.y == 0 ) {
int blockntot = (THR_NUMBER-2)*(THR_NUMBER-2);
float nval = ((float) ( blocksum[ multik] *2 - blockntot ))/ ( (float) blockntot );
atomicAdd(&(dev_single_run_avgs[multik]), nval);
blocksum[ multik ] = 0;
}
}
}
__syncthreads();
////////////////////////////////////////////
////// update
////////////////////////////////////////////
// macro-checkboards
// macro-white
if( (blockIdx.x + blockIdx.y%2)%2 == 0 ) {
/////////////
// checkboards
// update only in the inner 30x30 block of threads, because the edge threads aren't mapped to any grid spins
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// white
if( (glob_x + glob_y%2)%2 == 0 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// black
if( (glob_x + glob_y%2)%2 == 1 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
// if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 &&
// threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) {
// dev_grid[(glob_x )+ (glob_y )*L ] = shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] ;
// }
//////////
}
__syncthreads();
// macro-black
if( (blockIdx.x + blockIdx.y%2)%2 == 1 ) {
//////////
// checkboards
// update only in the inner 30x30 block of threads, because the edge threads aren't mapped to any grid spins
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// white
if( (glob_x + glob_y%2)%2 == 0 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// black
if( (glob_x + glob_y%2)%2 == 1 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
}
if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) {
dev_grid[(glob_x )+ (glob_y )*L ] = shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] ;
}
//////////
// __syncthreads();
}
void parall_measure_cycle(MULTISPIN startgrid[L*L], MULTISPIN * dev_grid, float exp4, float exp8, curandState * const rngStates, FILE *resf) {
float n_measures_per_sim = (float) ((T_MAX_SIM - T_MEASURE_WAIT)/T_MEASURE_INTERVAL);
// space for tracking magnetization
float single_run_avgs[MULTISIZE];
for (int k=0; k<MULTISIZE; k++) {single_run_avgs[k] = 0.;}
float * dev_single_run_avgs;
cudaMalloc(&dev_single_run_avgs, MULTISIZE*sizeof(float));
cudaMemcpy(dev_single_run_avgs, &single_run_avgs, MULTISIZE*sizeof(float), cudaMemcpyHostToDevice);
// extra space needed by update_magnetization
int partial_res[MULTISIZE];
for (int k=0; k<MULTISIZE; k++) {partial_res[k] = 0;}
int * dev_partial_res;
cudaMalloc(&dev_partial_res, MULTISIZE*sizeof(int));
cudaMemcpy(dev_partial_res, &partial_res, MULTISIZE*sizeof(int), cudaMemcpyHostToDevice);
// outer average
struct avg_tr avg_of_runs = new_avg_tr( MULTISIZE * STEPS_REPEAT );
for( int krep=0; krep< STEPS_REPEAT; krep++) {
if (HISTORY) printf("# simulation %i\n", krep+1);
if (HISTORY) printf("# waiting thermalization for the first %i sim steps.\n", T_MEASURE_WAIT);
cudaMemcpy(dev_grid, startgrid, L*L*sizeof(MULTISPIN), cudaMemcpyHostToDevice);
// kernel
for (int ksim=0; ksim<T_MAX_SIM; ksim++) {
dev_measure_cycle_kernel<<<BLOCKS, THREADS>>>(dev_grid, rngStates, dev_single_run_avgs, dev_partial_res, exp4, exp8, ksim );
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("kernel: ERROR: %s\n", cudaGetErrorString(err));
} else printf("kernel: no ERROR: %s\n", cudaGetErrorString(err));
// results
// magnetization
cudaMemcpy(&single_run_avgs, dev_single_run_avgs, MULTISIZE*sizeof(float), cudaMemcpyDeviceToHost);
for(int multik=0; multik <MULTISIZE; multik++) {
float lres = single_run_avgs[multik] / (n_measures_per_sim * BLOCK_NUMBER*BLOCK_NUMBER); // change
if (HISTORY) printf("# average on bit %i\n: %f\n", multik+1, lres);
update_avg(&avg_of_runs, lres);
// reset averages
single_run_avgs[multik] = 0.;
partial_res[multik] = 0;
}
//reset on device
cudaMemcpy(dev_single_run_avgs, &single_run_avgs, MULTISIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_partial_res, & partial_res, MULTISIZE*sizeof(int), cudaMemcpyHostToDevice);
if (HISTORY) printf("# end simulation %i\n", krep+1);
}
// END OUTER REPETITION LOOP
// magn
float l2av = average(avg_of_runs);
float l2stdev = stdev(avg_of_runs);
if (HISTORY) printf("# overall average \n: %f +- %f\n", l2av, l2stdev);
fprintf(resf, "%f ", l2av);
fprintf(resf, "%f\n", l2stdev);
// grid for displaying end-state (of last rep only)
MULTISPIN endgrid[L*L];
cudaMemcpy(endgrid, dev_grid, L*L*sizeof(MULTISPIN), cudaMemcpyDeviceToHost);
if (HISTORY) multidump_first(endgrid);
cudaFree(dev_partial_res);
cudaFree(dev_single_run_avgs);
}
int main() {
// L should be (multiple of THR_NUMBER -2) + 2
assert( ((L-2)% (THR_NUMBER-2) )== 0 );
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
FILE *resf = fopen("results.txt", "w");
fprintf(resf, "# gpu1\n");
fprintf(resf, "# parameters:\n# linear_size: %i\n", L);
fprintf(resf, "# coupling: %f\n# repetitions: %i\n", J, STEPS_REPEAT);
fprintf(resf, "# simulation_t_max: %i\n# thermalization_time: %i\n# time_between_measurements: %i\n# base_random_seed: %i\n", T_MAX_SIM,T_MEASURE_WAIT, T_MEASURE_INTERVAL, SEED);
fprintf(resf, "# extra:\n# area: %i\n# active_spins_excluding_boundaries:%i\n", AREA, NTOT);
fprintf(resf, "\n");
fprintf(resf, "# columns: temperature - average magnetization - uncertainty \n");
// still used for init_random_grid
srand(SEED);
// curand init
// Allocate memory for RNG states
curandState *d_rngStates = 0;
cudaMalloc((void **)&d_rngStates, THR_NUMBER*THR_NUMBER*BLOCK_NUMBER*BLOCK_NUMBER*sizeof(curandState));
// Initialise RNG
initRNG<<<BLOCKS, THREADS>>>(d_rngStates, SEED);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("initRNG: ERROR: %s\n", cudaGetErrorString(err));
} else printf("initRNG: no ERROR: %s\n", cudaGetErrorString(err));
// device grid
MULTISPIN * dev_grid;
cudaMalloc(&dev_grid, L*L*sizeof(MULTISPIN));
// original grid on the cpu
MULTISPIN startgrid[L*L];
init_t0_grid(startgrid);
// multidump_a_few(startgrid);
// // temp cycle:
// for( float kt=T_CYCLE_START; kt<T_CYCLE_END; kt+=T_CYCLE_STEP ) {
// const float EXP4 = exp( -(4.*J) / kt);
// const float EXP8 = exp( -(8.*J) / kt);
// fprintf(resf, "%f ", kt);
// if (HISTORY) printf("temperature: %f\n", kt);
// parall_measure_cycle(startgrid, dev_grid, EXP4, EXP8, d_rngStates, resf);
// }
// // // // only 1:
// // // // just one:
const float EXP4 = exp( -(4.*J) / SINGLETEMP);
const float EXP8 = exp( -(8.*J) / SINGLETEMP);
fprintf(resf, "%f ", SINGLETEMP);
if (HISTORY) printf("temperature: %f\n", SINGLETEMP);
parall_measure_cycle(startgrid, dev_grid, EXP4, EXP8, d_rngStates, resf);
printf(" ERROR? rng malloc size: %i\n", THR_NUMBER*THR_NUMBER*BLOCK_NUMBER*BLOCK_NUMBER*sizeof(curandState));
printf(" ERROR? shared memory used: %i\n", THR_NUMBER*THR_NUMBER*sizeof(MULTISPIN) + BLOCK_NUMBER*BLOCK_NUMBER*MULTISIZE*sizeof(int));
cudaFree(d_rngStates);
cudaFree(dev_grid);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float total_time = 0;
cudaEventElapsedTime(&total_time, start, stop);
FILE *timef = fopen("time.txt", "w");
long int total_flips = ((long int)(n_temps))* ((long int)((STEPS_REPEAT))) * ((long int)(T_MAX_SIM)) * ((long int)(NTOT));
fprintf(timef, "# gpu1\n");
fprintf(timef, "# total execution time (milliseconds):\n");
fprintf(timef, "%f\n", total_time);
fprintf(timef, "# total spin flips performed:\n");
fprintf(timef, "%li\n", total_flips);
fprintf(timef, "# average spin flips per millisecond:\n");
fprintf(timef, "%Lf\n", ((long double) total_flips )/( (long double) total_time ) );
fclose(timef);
fclose(resf);
return 0;
}
|
21,889 | #include "includes.h"
#define TILE_WIDTH 32
#define TILE_HEIGHT 32
#define FSize 256
//void convolution(int *InputImage,int width,int height,int *filter,int filterWidth,,int padding,int *result);
using namespace std;
__global__ void MatrixMultiple(int *InputImage,int width,int height,int *filter,int filterWidth,int *featureMap)
{
/* get global row col */
int Row=blockIdx.y*TILE_HEIGHT+threadIdx.y;
int Col=blockIdx.x*TILE_WIDTH+threadIdx.x;
int value=0;
int feathreMapwidth=width-filterWidth+1;
if(Row*width+Col<width*height)
{
for(int i=0;i<filterWidth;i++)
{
for(int j=0;j<filterWidth;j++)
{
value+=filter[i*filterWidth+j]* InputImage[(Row+i)*width+Col+j];
}
}
//printf("%d %d\n",Row*width+Col,value);
featureMap[feathreMapwidth*Row+Col]=value;
}
//printf("%d %d\n",Row*width+Col,value);
} |
21,890 | #include "includes.h"
__global__ void callOperation(int *niz, int *res, int k, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n) {
return;
}
if (niz[tid] == k) {
atomicAdd(res, 1);
}
} |
21,891 | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// You can use any other block size you wish.
#define BLOCK_SIZE 512
#define BLOCK_DUB 1024
#define DEFAULT_NUM_ELEMENTS 1024
#define MAX_RAND 2
typedef float REAL;
__global__ void prescan(REAL *odata, REAL *idata, int num)
{
volatile __shared__ REAL temp[BLOCK_DUB];
int ti = threadIdx.x;
int bid = blockIdx.x + blockIdx.y*gridDim.x;
int index = bid*blockDim.x + ti;
int ofs = 1;
int mult = DEFAULT_NUM_ELEMENTS/num;
int top = mult*(2*index+1);
if (top < DEFAULT_NUM_ELEMENTS)
{
temp[2*ti] = idata[2*index*mult];
temp[2*ti+1] = idata[top];
} else {
temp[2*ti] = 0;
temp[2*ti+1] = 0;
}
for (int i = BLOCK_SIZE; i>0; i>>=1)
{
__syncthreads();
if (ti<i)
{
int ai = ofs*(2*ti+1)-1;
int bi = ofs*(2*ti+2)-1;
temp[bi] += temp[ai];
}
ofs <<= 1;
}
__syncthreads();
if (top < DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult] = temp[2*ti];
idata[top] = temp[2*ti+1];
}
}
__global__ void downsweep(REAL *odata, REAL *idata, int num)
{
volatile __shared__ REAL tempd[BLOCK_DUB];
int ti = threadIdx.x;
int ofs = num;
tempd[ti] = idata[ti];
tempd[ti+blockDim.x] = idata[ti+blockDim.x];
if (ti ==0) {
tempd[num-1] = 0;
}
for (int j = 1; j<num; j<<=1)
{
ofs >>= 1;
__syncthreads();
if (ti < j)
{
int ai = ofs*(2*ti+1)-1;
int bi = ofs*(2*ti+2)-1;
REAL temp2 = tempd[ai];
tempd[ai] = tempd[bi];
tempd[bi] += temp2;
}
}
__syncthreads();
odata[ti] = tempd[ti];
odata[ti+blockDim.x] = tempd[ti+blockDim.x];
}
// **===-------- Modify the body of this function -----------===**
// You may need to make multiple kernel calls.
void prescanArray(REAL *outArray, REAL *inArray, int numElements)
{
//Use kernel to compute the reduction
int blocksx, blocksy, blocks;
int threads = BLOCK_SIZE;
int nestElements = numElements;
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid(blocksx,blocksy);
while(nestElements > 1)
{
// Recursive implementation to compute the reduction
prescan <<<dimGrid,threads>>> (outArray, inArray, nestElements);
nestElements = blocks;
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid(blocksx, blocksy);
}
//prescan <<<1,BLOCK_SIZE>>>(outArray, inArray, numElements);
//cudaThreadSynchronize();
downsweep <<<1,BLOCK_SIZE>>>(outArray, inArray, numElements);
}
// **===-----------------------------------------------------------===**
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C"
unsigned int compare( const REAL* reference, const REAL* data,
const unsigned int len);
extern "C"
void computeGold( REAL* reference, REAL* idata, const unsigned int len);
unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
float device_time;
float host_time;
int num_elements = 0; // Must support large, non-power-of-2 arrays
// allocate host memory to store the input data
unsigned int mem_size = sizeof( REAL) * num_elements;
REAL* h_data = (REAL*) malloc( mem_size);
switch(argc-1)
{
case 0:
num_elements = DEFAULT_NUM_ELEMENTS;
// allocate host memory to store the input data
mem_size = sizeof( REAL) * num_elements;
h_data = (REAL*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
default:
num_elements = atoi(argv[1]);
// allocate host memory to store the input data
mem_size = sizeof( REAL) * num_elements;
h_data = (REAL*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
}
cudaEvent_t time_start;
cudaEvent_t time_end;
cudaEventCreate(&time_start);
cudaEventCreate(&time_end);
// compute reference solution
REAL* reference = (REAL*) malloc( mem_size);
// cutStartTimer(timer);
cudaEventRecord(time_start, 0);
computeGold( reference, h_data, num_elements);
cudaEventRecord(time_end, 0);
cudaEventSynchronize(time_end);
cudaEventElapsedTime(&host_time, time_start, time_end);
// cutStopTimer(timer);
printf("\n\n**===-------------------------------------------------===**\n");
printf("Processing %d elements...\n", num_elements);
printf("Host CPU Processing time: %f (ms)\n", host_time);
// allocate device memory input and output arrays
REAL* d_idata = NULL;
REAL* d_odata = NULL;
cudaMalloc( (void**) &d_idata, mem_size);
cudaMalloc( (void**) &d_odata, mem_size);
// copy host memory to device input array
cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice);
// initialize all the other device arrays to be safe
cudaMemcpy( d_odata, h_data, mem_size, cudaMemcpyHostToDevice);
// **===-------- Allocate data structure here -----------===**
// preallocBlockSums(num_elements);
// **===-----------------------------------------------------------===**
// Run just once to remove startup overhead for more accurate performance
// measurement
//prescanArray(d_odata, d_idata, 16);
// Run the prescan
// CUT_SAFE_CALL(cutCreateTimer(&timer));
// cutStartTimer(timer);
cudaEventRecord(time_start, 0);
// **===-------- Modify the body of this function -----------===**
prescanArray(d_odata, d_idata, num_elements);
// **===-----------------------------------------------------------===**
cudaThreadSynchronize();
cudaEventRecord(time_end, 0);
cudaEventSynchronize(time_end);
cudaEventElapsedTime(&device_time, time_start, time_end);
cudaEventDestroy(time_start);
cudaEventDestroy(time_end);
// cutStopTimer(timer);
printf("CUDA Processing time: %g (ms)\n", device_time);
// device_time = cutGetTimerValue(timer);
// printf("Speedup: %fX\n", host_time/device_time);
// **===-------- Deallocate data structure here -----------===**
// deallocBlockSums();
// **===-----------------------------------------------------------===**
// copy result from device to host
cudaMemcpy( h_data, d_odata, sizeof(REAL) * num_elements,
cudaMemcpyDeviceToHost);
// Check if the result is equivalent to the expected soluion
unsigned int result_regtest = cutComparef( reference, h_data, num_elements, 1e-7);
printf( "Test %s\n", (0 == result_regtest) ? "FAILED" : "PASSED");
// cleanup memory
free( h_data);
free( reference);
cudaFree( d_odata);
cudaFree( d_idata);
}
unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err) {
int i;
int diff_count = 0;
for (i = 0; i < num_elements; i++) {
REAL diff = fabs(reference[i] - h_data[i]);
REAL denominator = 1.f;
if (denominator < fabs(reference[i])) {
denominator = fabs(reference[i]);
}
if (!(diff / denominator < err)) {
diff_count ++;
}
}
if (diff_count > 0) {
printf("Number of difference: %d\n", diff_count);
return 0;
} else {
return 1;
}
}
|
21,892 | #include <stdio.h>
#include <stdlib.h>
__global__ // <--- writing a kernel function to be run on the gpu (called on host)
void saveIDs(int *idsOut){
//int tid = threadIdx.x;
// int bidx = blockIdx.x;
// int bdim = blockDim.x;
// int globaltid;
//
//globaltid = blockIdx.x * blockDim.x + threadIdx.x;
const int tid = threadIdx.x; // <-- multi-dimensional; just getting one dim right now
idsOut[tid] = tid; // put my gpu thread id into the vector
}
// __host__ default for all functions
// __device__ can be invoked on GPU side (called on device and runs on device - kernel helpers)
__global__ // <--- writing a kernel function to be run on the gpu (called on host)
void steady(double *plate1, double *plate2, double *maxDiff, int &rows, int &cols, double &epsilon, int &iteration){
double diff = 0;
int globaltid;
globaltid = blockIdx.x * blockDim.x + threadIdx.x;
//if (globaltid == -1){
// for(int i=0;i<rows;i++){
// for (int j=0;j<cols;j++){
// printf("%4.4f ", *(plate1 + i*cols + j));
// }
// printf("\n");
// }
//}
//if (globaltid > 4990)
// printf("MADE IT %d\n",globaltid);
//return;
//if (globaltid < rows-2){
//printf("%d: %4.2f %d %d %1.2f %d\n", globaltid, *(plate1 + globaltid*cols), rows, cols, epsilon, iteration);
if (globaltid < rows-2){
maxDiff[globaltid]=0;
//printf("%d: %d %d\n", globaltid, iteration, iteration%2);
if (iteration%2==0){
for(int i=1;i<cols-1;i++){
*(plate2 + (globaltid+1)*cols + i) = 0.25 * (*(plate1 + (globaltid+1)*cols + i-1) +
*(plate1 + (globaltid+1)*cols + i+1) +
*(plate1 + (globaltid+1-1)*cols + i) +
*(plate1 + (globaltid+1+1)*cols + i));
diff = abs(*(plate2 + (globaltid+1)*cols + i) - *(plate1 + (globaltid+1)*cols + i));
if(maxDiff[globaltid]<diff)
maxDiff[globaltid]=diff;
}
//printf("%d: %4.4f %4.4f\n", globaltid, maxDiff[globaltid],diff);
}else{
for(int i=1;i<cols-1;i++){
//val = *(plate2 (globaltid+1)*cols + i-1) ;
*(plate1 + (globaltid+1)*cols + i) = 0.25 * (*(plate2 + (globaltid+1)*cols + i-1) +
*(plate2 + (globaltid+1)*cols + i+1) +
*(plate2 + (globaltid+1-1)*cols + i) +
*(plate2 + (globaltid+1+1)*cols + i));
diff = abs(*(plate1 + (globaltid+1)*cols + i) - *(plate2 + (globaltid+1)*cols + i));
if(maxDiff[globaltid]<diff)
maxDiff[globaltid]=diff;
}
}
}
//printf("%d: %4.4f %4.4f\n", globaltid, maxDiff[globaltid],diff);
//}
//double **myplate = plate1 + globaltid*cols);
//printf("%d: %4.2f %d %d %1.2f %d\n", globaltid, *(plate1 + globaltid*cols), rows, cols, epsilon, iteration);
//printf("%d: %4.2f %d %d %1.2f %d\n", globaltid, 49.6, rows, cols, epsilon, iteration);
//printf("%d: \n", globaltid);
}
int main(int argc, char *argv[])
{
if (argc < 8){
printf("Too Few Params\n");
return -1;
}
int rows, *drows;
int cols, *dcols;
int power=0, iteration=0,*diteration;
double top;
double myleft;
double myright;
double bottom;
double epsilon, *depsilon;
double **hplate1;
double **hplate2;
double *dplate1;
double *dplate2;
double *maxDiff, *dmaxDiff, totalmaxDiff;
int blocks, threadsPerBlock;
rows = atoi(argv[1]);
cols = atoi(argv[2]);
top = atof(argv[3]);
myleft = atof(argv[4]);
myright = atof(argv[5]);
bottom = atof(argv[6]);
epsilon = atof(argv[7]);
//double numerator = ((cols-2)*top + (rows-1)*(myleft+myright) + bottom*cols);
//double demon = (cols-2) + 2*(rows-1) + cols;
double start = ((cols-2)*top + (rows-1)*(myleft+myright) + bottom*cols) / ((cols-2) + 2*(rows-1) + cols);
//printf("%4.2f\n", start);
hplate1 = (double **)malloc(rows*sizeof(double *));
hplate1[0] = (double *)malloc(rows*cols*sizeof(double));
hplate2 = (double **)malloc(rows*sizeof(double *));
hplate2[0] = (double *)malloc(rows*cols*sizeof(double));
maxDiff = (double *)malloc((rows-2)*sizeof(double));
//void steady(double *plate1, double *plate2, double *maxDiff, int &rows, int &cols, double &epsilon, int &iteration){
cudaMalloc(&dplate1,rows*cols*sizeof(double));
cudaMalloc(&dplate2,rows*cols*sizeof(double));
cudaMalloc(&dmaxDiff,(rows-2)*sizeof(double));
cudaMalloc(&drows,sizeof(int));
cudaMalloc(&dcols,sizeof(int));
cudaMalloc(&depsilon,sizeof(double));
cudaMalloc(&diteration,sizeof(int));
for(int i=0;i<rows;i++){
hplate1[i] = (*hplate1 + i*cols);
hplate2[i] = (*hplate2 + i*cols);
}
for(int r=0;r<rows-1;r++){
hplate1[r][0] = myleft;
hplate1[r][cols-1] = myright;
hplate2[r][0] = myleft;
hplate2[r][cols-1] = myright;
}
for(int c=0;c<cols;c++){
if (c==0 || c==cols-1){
hplate1[rows-1][c] = bottom;
hplate2[rows-1][c] = bottom;
}else{
hplate1[0][c] = top;
hplate1[rows-1][c] = bottom;
hplate2[0][c] = top;
hplate2[rows-1][c] = bottom;
}
}
for(int r=1;r<rows-1;r++){
for(int c=1;c<cols-1;c++){
hplate1[r][c] = start;
hplate2[r][c] = start;
}
}
//void steady(double *plate1, double *plate2, double *maxDiff, int &rows, int &cols, double &epsilon, int &iteration){
cudaMemcpy(dplate1, hplate1[0], rows*cols*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dplate2, hplate2[0], rows*cols*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(drows, &rows, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dcols, &cols, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(depsilon, &epsilon, sizeof(double), cudaMemcpyHostToDevice);
//steady();
blocks = (int)((rows-2)/1024) + 1;
if ((rows-2)%blocks == 0)
threadsPerBlock = (int)((rows-2)/blocks);
else
threadsPerBlock = (int)((rows-2)/blocks) + blocks;
printf("blocks %d, threads per block %d\n\n", blocks, threadsPerBlock);
do{
cudaMemcpy(diteration, &iteration, sizeof(int), cudaMemcpyHostToDevice);
steady<<< blocks, threadsPerBlock >>>(dplate1, dplate2, dmaxDiff, *drows, *dcols, *depsilon, *diteration);
cudaDeviceSynchronize();
cudaMemcpy(maxDiff, dmaxDiff, (rows-2)*sizeof(double), cudaMemcpyDeviceToHost);
totalmaxDiff = 0;
for (int i=0;i<rows-2;i++){
//printf("maxDiff[%d] = %f\n", i, maxDiff[i]);
if(maxDiff[i]>totalmaxDiff)
totalmaxDiff=maxDiff[i];
}
if(iteration == (int)pow(2.0,power)){
power+=1;
printf("%d: %f\n",iteration,totalmaxDiff);
}
//if(iteration == 0){
// printf("%d: %f\n",iteration,totalmaxDiff);
//
//
//}//
iteration+=1;
//break;
}while(totalmaxDiff>epsilon);
printf("%d: %4.4f\n",iteration-1,totalmaxDiff);
free(hplate1);
free(hplate2);
free(maxDiff);
cudaFree(dplate1);
cudaFree(dplate2);
cudaFree(dmaxDiff);
cudaFree(drows);
cudaFree(dcols);
cudaFree(depsilon);
cudaFree(diteration);
//cudaFree(dIDs);
//hplate1 = (double **)malloc(rows*sizeof(double *));
//hplate1[0] = (double *)malloc(rows*cols*sizeof(double));
//hplate2 = (double **)malloc(rows*sizeof(double *));
//hplate2[0] = (double *)malloc(rows*cols*sizeof(double));
//maxDiff = (double *)malloc((rows-2)*sizeof(double));
//
////void steady(double *plate1, double *plate2, double *maxDiff, int &rows, int &cols, double &epsilon, int &iteration){
//cudaMalloc(&dplate1,rows*cols*sizeof(double));
//cudaMalloc(&dplate2,rows*cols*sizeof(double));
//cudaMalloc(&dmaxDiff,(rows-2)*sizeof(double));
//cudaMalloc(&drows,sizeof(int));
//cudaMalloc(&dcols,sizeof(int));
//cudaMalloc(&depsilon,sizeof(double));
//cudaMalloc(&diteration,sizeof(int));
printf("done\n");
cudaDeviceReset();
return 0;
}
|
21,893 | #include "includes.h"
__global__ void convertKernel(short* idata, float* odata, int size)
{
int tidx = threadIdx.x + blockIdx.x*blockDim.x;
if(tidx < size)
odata[tidx] = (float)idata[tidx];
} |
21,894 | #ifndef _DEV_SPH_KERNELS_CU_
#define _DEV_SPH_KERNELS_CU_
#define PI 3.141592653589793
#define iPI 0.318309886183791
__device__ float w(float u) {
if (u < 0)
return iPI;
else if (u < 1)
return iPI * (1 - 1.5*u*u + 0.75*u*u*u);
else if (u < 2)
return iPI*0.25 * (2-u)*(2-u)*(2-u);
else
return 0;
}
__device__ float diw(float u) {
return 12.5663706143592 * u*u * w(u);
}
__device__ float iw(float u) {
if (u < 0) {
return 0;
} else if (u < 1) {
float u3 = u*u*u;
return 4 * (u3/3.0 - 0.3*u3*u*u + 0.125*u3*u3);
} else if (u < 2) {
float u3 = u*u*u;
return 4* (-1.0/60 + 2.0/3*u3 - 0.75*u3*u + 0.3*u3*u*u - u3*u3/24.0);
} else {
return 1;
}
}
__device__ float ig(float u) {
float alpha = 4;
return iw(alpha * (1 - sqrt((u-1)*(u-1))));
// /* equivalent to */
// if (u < 1)
// return iw(alpha * u);
// else if (u < 2)
// return iw(alpha * (2 - u));
// else
// return 0;
}
__device__ float digdh(float u) {
float alpha = 4;
if (u <= 0)
return 0;
else if (u < 1)
return alpha*u * diw(alpha * u);
else if (u < 2)
return -alpha*u * diw(alpha * (2 - u));
else
return 0;
}
__device__ float dig(float u) {
float alpha = 4;
if (u <= 0)
return 0;
else if (u < 1)
return alpha/u * diw(alpha * u);
else if (u < 2)
return -alpha/u * diw(alpha * (2 - u));
else
return 0;
}
__device__ float dw(float u) {
if (u < 1)
return iPI * (-3 + 2.25 * u);
else if (u < 2)
return -iPI * 0.75 * (2-u)*(2-u)/u;
else
return 0;
}
__device__ float dwdh(float u) {
if (u < 1)
return iPI * (-3 + 7.5*u*u - 4.5*u*u*u);
else if (u < 2)
return iPI * (-6 + 12*u - 7.5*u*u + 1.5*u*u*u);
else
return 0;
}
__device__ float dphidh(float u) {
if (u < 1)
return 1.4 - 2*u*u + 1.5*u*u*u*u - 0.6*u*u*u*u*u;
else if (u < 2)
return 1.6 - 4*u*u + 4*u*u*u - 1.5*u*u*u*u + 0.2*u*u*u*u*u;
else
return 0;
}
#endif
|
21,895 | #include "includes.h"
__global__ void abc(){} |
21,896 | #include "includes.h"
__global__ void scan_y(int* g_odata, int* g_idata, int n) {
extern __shared__ int temp[]; // allocated on invocation
int thid = threadIdx.x;
int bid = blockIdx.x;
int bdim = blockDim.x;
int gdim = gridDim.x;
int offset = 1;
temp[2 * thid] =
g_idata[bid + 2 * thid * gdim]; // load input into shared memory
temp[2 * thid + 1] = g_idata[bid + 2 * thid * gdim + gdim];
for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) {
temp[n - 1] = 0;
} // clear the last element
for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[bid + 2 * thid * gdim] =
temp[2 * thid + 1]; // write results to device memory
int second_ind = 2 * thid + 2;
if (second_ind == bdim * 2) {
g_odata[bid + 2 * thid * gdim + gdim] =
temp[2 * thid + 1] + g_idata[bid + 2 * thid * gdim + gdim];
} else {
g_odata[bid + 2 * thid * gdim + gdim] = temp[2 * thid + 2];
}
} |
21,897 | #include "includes.h"
#define LOG 0
/*
* An implementation of parallel reduction using nested kernel launches from
* CUDA kernels. This version adds optimizations on to the work in
* nestedReduce.cu.
*/
// Recursive Implementation of Interleaved Pair Approach
__global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
} |
21,898 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <thrust/sort.h>
/*
nvcc -O3 -arch=sm_30 -o cuda_monkey monkey.cu
*/
unsigned int print2Smallest(unsigned int *arr, unsigned int arr_size)
{
unsigned int i, first, second;
/* There should be atleast two elements */
if (arr_size < 2)
{
printf(" Invalid Input ");
return 0;
}
// Error was here, before we had INT_MAX which is too low for >9 sailors
first = second = UINT_MAX;
for (i = 0; i < arr_size ; i ++)
{
/* If current element is smaller than first
then update both first and second */
if (arr[i] < first)
{
second = first;
first = arr[i];
}
/* If arr[i] is in between first and second
then update second */
else if (arr[i] < second && arr[i] != first)
second = arr[i];
}
if (second == UINT_MAX)
return first;
else
return second;
}
__global__
void monkey(unsigned long long int *coconuts, unsigned long long int extra, unsigned int *the_solutions, unsigned int *found, unsigned int sailors, unsigned int monkeys, unsigned int n)
{
if (found[0] == 0){
unsigned int j;
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i<n; i+=blockDim.x*gridDim.x){
coconuts[i] = i + extra;
if (coconuts[i]%2!=0){
// Go through the number of sailors
for (j=0; j<sailors;j++){
// One for each monkey
coconuts[i] -= monkeys;
if (coconuts[i] % sailors != 0){
break;
}
coconuts[i] -= coconuts[i]/sailors;
}
if (coconuts[i] % sailors == 0){
found[0] = 1;
the_solutions[i] = i;
}
}
}
}
}
// Main method
int main()
{
clock_t start, diff;
// Size of array.
unsigned int SIZE = pow(2,25);
// CPU memory pointers
unsigned long long int *h_coc, da_solu=0;
unsigned int *h_found, *h_solutions;
// GPU memory pointers
unsigned long long int *d_coc, extra = 0;
unsigned int *d_found, *d_solutions;
// Allocate the space, CPU
h_coc = (unsigned long long int *)malloc(SIZE*sizeof(unsigned long long int));
//h_solutions = (unsigned int *)malloc(SIZE*sizeof(unsigned int));
cudaHostAlloc((void**)&h_solutions, SIZE*sizeof(unsigned int), cudaHostAllocDefault);
h_found = (unsigned int *)malloc(1*sizeof(unsigned int));
// Choose to run on secondary GPU
cudaSetDevice(1);
// Allocate the space, GPU
cudaMalloc(&d_coc, SIZE*sizeof(unsigned long long int));
cudaMalloc(&d_found, 1*sizeof(unsigned int));
cudaMalloc(&d_solutions, SIZE*sizeof(unsigned int));
//cudamemset can be used for initializing data (say, all zeros). 10 times faster than cudaMemcpy zero array because it is done on the gpu directly.
cudaMemset(d_solutions, 0, SIZE*sizeof(unsigned int));
unsigned int monkeys = 1;
unsigned int max_sailors = 5;
// Start timer
start = clock();
/*
if (monkeys == even)
solution will be even
else
solution will be odd
somehow the kernel should then only search for even or odd solutions.
At the moment we have implemented a very speed friendly way of sending the current num of nuts to search for. This should be done in a friendlier way.
The workload will then be cut in half and it should thus take half as long
*/
// Run the loop
for (unsigned int sailors=2; sailors<max_sailors+1;sailors++){
printf("Running %u sailors, %u monkeys", sailors, monkeys);
// Send back that we want to look for a new solution
h_found[0] = 0;
cudaMemset(d_found, 0, 1*sizeof(unsigned int));
// Run this loop until a solution is found for this sailor & monkey combination
while (h_found[0] == 0){
// Calling kernel (gridsize, blocksize)
monkey<<<(SIZE + 255) / 256, 256>>>(d_coc, extra, d_solutions, d_found, sailors, monkeys, SIZE);
// Copy back result (Device to Host).
cudaMemcpy(h_found, d_found, 1*sizeof(unsigned int), cudaMemcpyDeviceToHost);
if (h_found[0] == 1){
// Copy back result (Device to Host). This is pinned memory so +6 Gb/s
cudaMemcpy(h_solutions, d_solutions, SIZE*sizeof(unsigned int), cudaMemcpyDeviceToHost);
//cudaMemcpyAsync(h_solutions, d_solutions, SIZE*sizeof(unsigned int), cudaMemcpyDeviceToHost, 0);
//cudaDeviceSynchronize();
// Get second smallest in solutions array and recast
// possibly do this on gpu as well
da_solu = (unsigned long long int) print2Smallest(h_solutions, SIZE);
printf("\nSolution: %llu coconuts to begin with\n\n", da_solu+extra);
if (sailors != max_sailors){
// Set solution array to zero again
cudaMemset(d_solutions, 0, SIZE*sizeof(unsigned int));
}
}
else{
extra +=SIZE;
//printf(".");
}
}
// Assume that result for 5 sailors is larger than for 4 sailors and so on..
extra += da_solu;
}
// watch -n 0.5 "nvidia-settings -q GPUUtilization -q useddedicatedgpumemory"
// Print execution time
diff = clock() - start;
double totalt = (double)diff/CLOCKS_PER_SEC;
printf("Totalt: %f s\n", totalt);
// Free the allocated memory
free(h_coc);
free(h_found);
//free(h_solutions);
// Pinned memory needs to be released with the command
cudaFreeHost(h_solutions);
// Free GPU memory
cudaFree(d_coc);
cudaFree(d_found);
cudaFree(d_solutions);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
return 0;
} |
21,899 | #include <iostream>
#include <cuda_runtime_api.h>
int main()
{
int deviceCount;
cudaDeviceProp deviceProp;
//Сколько устройств CUDA установлено на PC.
cudaGetDeviceCount(&deviceCount);
std::cout << "Device count: " << deviceCount << "\n\n";
for (int i = 0; i < deviceCount; i++)
{
//Получаем информацию об устройстве
cudaGetDeviceProperties(&deviceProp, i);
//Выводим иформацию об устройстве
std::cout << "Device name: " << deviceProp.name << "\n";
std::cout << "Total global memory: " << deviceProp.totalGlobalMem << "\n";
std::cout << "Shared memory per block: " << deviceProp.sharedMemPerBlock << "\n";
std::cout << "Registers per block: " << deviceProp.regsPerBlock << "\n";
std::cout << "Warp size: " << deviceProp.warpSize << "\n";
std::cout << "Memory pitch: " << deviceProp.memPitch << "\n";
std::cout << "Max threads per block: " << deviceProp.maxThreadsPerBlock << "\n";
std::cout << "Max threads dimensions: x = " << deviceProp.maxThreadsDim[0] << ", y = " << deviceProp.maxThreadsDim[1] << ", z = " << deviceProp.maxThreadsDim[2] << "\n";
std::cout << "Max grid size: x = " << deviceProp.maxGridSize[0] << ", y = " << deviceProp.maxGridSize[1] << ", z = " << deviceProp.maxGridSize[2] << "\n";
std::cout << "Clock rate: " << deviceProp.clockRate << "\n";
std::cout << "Total constant memory: " << deviceProp.totalConstMem << "\n";
std::cout << "Compute capability: " << deviceProp.major << " " << deviceProp.minor << "\n";
std::cout << "Texture alignment: " << deviceProp.textureAlignment << "\n";
std::cout << "Device overlap: " << deviceProp.deviceOverlap << "\n";
std::cout << "Multiprocessor count: " << deviceProp.multiProcessorCount << "\n";
std::cout << "Kernel execution timeout enabled: " << deviceProp.kernelExecTimeoutEnabled ? "true \n" : "false \n";
}
return 0;
}
|
21,900 | #include "includes.h"
__global__ void depthwise_filter_backward(int B, int N, int M, int F, int C, int r, int K, const int* nnIndex, const int* nnCount, const int* binIndex, const float* input, const float* gradOutput, float* gradFilter, int sharedMemSize, int startIdx)
{
extern __shared__ float gradPerBlock[]; // the gradient on each block
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
gradPerBlock[i] = 0; // for 1D block
}
__syncthreads();
int endIdx = sharedMemSize+startIdx;
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
int f = binIndex[i*M*K+m*K+k];
float derFilt = gradOutput[i*M*C*r+j]*input[i*N*C+n*C+cin]/nnSize;
int currIdx = f*C*r+cout;
if((currIdx>=startIdx) && (currIdx<endIdx)) // within the shared memory
{
atomicAdd(&gradPerBlock[currIdx-startIdx],derFilt);
}
}
}
}
__syncthreads();
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
atomicAdd(&gradFilter[i+startIdx],gradPerBlock[i]); // for 1D block
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.